mirror of
https://github.com/openappsec/attachment.git
synced 2025-09-30 11:44:29 +03:00
Istio support (#30)
* adding istio files * fix the envoy CMakList file * fix the envoy CMakList file * adding the .mod file * adding the webhook injector image * adding istio files * adding istio files * fix the envoy CMakList file * fix the envoy CMakList file * adding the .mod file * adding the webhook injector image * adding istio files * pulling from dev * fix the envoy CMakList file * adding istio files * fix missing header * fix wrong name of library * fix envoy CMakeLists * remove cloud guard names * remove cloud guard names * adding istio files * adding istio files * [JIRA] INXT-44274: test agent image * add Daniel fixes * remove zlib library * remove nano attachment ut
This commit is contained in:
6
core/shmem_ipc_2/CMakeLists.txt
Executable file
6
core/shmem_ipc_2/CMakeLists.txt
Executable file
@@ -0,0 +1,6 @@
|
||||
add_library(shmem_ipc_2 SHARED shmem_ipc.c shared_ring_queue.c)
|
||||
|
||||
target_link_libraries(shmem_ipc_2 -lrt)
|
||||
|
||||
install(TARGETS shmem_ipc_2 DESTINATION lib)
|
||||
install(TARGETS shmem_ipc_2 DESTINATION nginx_attachment/lib PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ)
|
55
core/shmem_ipc_2/shared_ipc_debug.h
Executable file
55
core/shmem_ipc_2/shared_ipc_debug.h
Executable file
@@ -0,0 +1,55 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __SHARED_IPC_DEBUG_H__
|
||||
#define __SHARED_IPC_DEBUG_H__
|
||||
|
||||
typedef struct LoggingData {
|
||||
int dbg_level;
|
||||
int worker_id;
|
||||
int fd;
|
||||
} LoggingData;
|
||||
|
||||
extern void (*debug_int)(
|
||||
const LoggingData *loggin_data,
|
||||
uint32_t worker_id,
|
||||
int is_error,
|
||||
const char *func,
|
||||
const char *file,
|
||||
int line_num,
|
||||
const char *fmt,
|
||||
...
|
||||
);
|
||||
|
||||
#ifndef __FILENAME__
|
||||
#define __FILENAME__ (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
|
||||
#endif
|
||||
|
||||
enum debugLevel { TraceLevel = 0, DebugLevel = 1, WarningLevel = 3 };
|
||||
|
||||
#define writeDebug(logging_data, debug_level, fmt, ...) \
|
||||
{ \
|
||||
debug_int( \
|
||||
logging_data, \
|
||||
(logging_data)->worker_id, \
|
||||
debug_level, \
|
||||
__func__, \
|
||||
__FILENAME__, \
|
||||
__LINE__, \
|
||||
fmt, \
|
||||
##__VA_ARGS__ \
|
||||
); \
|
||||
}
|
||||
|
||||
#endif // __SHARED_IPC_DEBUG_H__
|
652
core/shmem_ipc_2/shared_ring_queue.c
Executable file
652
core/shmem_ipc_2/shared_ring_queue.c
Executable file
@@ -0,0 +1,652 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "shared_ring_queue.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <ctype.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include "shared_ipc_debug.h"
|
||||
|
||||
static const uint16_t empty_buff_mgmt_magic = 0xfffe;
|
||||
static const uint16_t skip_buff_mgmt_magic = 0xfffd;
|
||||
static const uint32_t max_write_size = 0xfffc;
|
||||
const uint16_t max_num_of_data_segments = sizeof(DataSegment)/sizeof(uint16_t);
|
||||
|
||||
// LCOV_EXCL_START Reason: Handing it to Envoy prototype development
|
||||
|
||||
static int
|
||||
getNumOfDataSegmentsNeeded(LoggingData *logging_data, uint16_t data_size)
|
||||
{
|
||||
int res = (data_size + SHARED_MEMORY_SEGMENT_ENTRY_SIZE - 1) / SHARED_MEMORY_SEGMENT_ENTRY_SIZE;
|
||||
writeDebug(
|
||||
logging_data,
|
||||
TraceLevel,
|
||||
"Checking amount of segments needed. Res: %d, data size: %u, shmem entry size: %u",
|
||||
res,
|
||||
data_size,
|
||||
SHARED_MEMORY_SEGMENT_ENTRY_SIZE
|
||||
);
|
||||
return res;
|
||||
}
|
||||
|
||||
static int
|
||||
isThereEnoughMemoryInQueue(
|
||||
LoggingData *logging_data,
|
||||
SharedRingGlobalData *global_data,
|
||||
uint16_t write_pos,
|
||||
uint16_t read_pos,
|
||||
uint8_t num_of_elem_to_push
|
||||
)
|
||||
{
|
||||
int res;
|
||||
|
||||
writeDebug(
|
||||
logging_data,
|
||||
TraceLevel, "Checking if memory has space for new elements. "
|
||||
"Num of elements to push: %u, write index: %u, read index: %u, amount of queue segments: %u",
|
||||
num_of_elem_to_push,
|
||||
write_pos,
|
||||
read_pos,
|
||||
global_data->g_num_of_data_segments
|
||||
);
|
||||
if (num_of_elem_to_push >= global_data->g_num_of_data_segments) {
|
||||
writeDebug(
|
||||
logging_data,
|
||||
TraceLevel,
|
||||
"Amount of elements to push is larger then amount of available elements in the queue"
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// add skipped elements during write that does not fit from cur write position till end of queue
|
||||
if (write_pos + num_of_elem_to_push > global_data->g_num_of_data_segments) {
|
||||
num_of_elem_to_push += global_data->g_num_of_data_segments - write_pos;
|
||||
}
|
||||
|
||||
// removing the aspect of circularity in queue and simulating as if the queue continued at its end
|
||||
if (write_pos + num_of_elem_to_push >= global_data->g_num_of_data_segments) {
|
||||
read_pos += global_data->g_num_of_data_segments;
|
||||
}
|
||||
|
||||
res = write_pos + num_of_elem_to_push < read_pos || write_pos >= read_pos;
|
||||
writeDebug(logging_data, TraceLevel, "Finished checking if there is enough place in shared memory. Res: %d", res);
|
||||
return res;
|
||||
}
|
||||
|
||||
static int
|
||||
isGetPossitionSucceccful(
|
||||
SharedRingQueue *queue,
|
||||
SharedRingGlobalData *global_data,
|
||||
uint16_t *read_pos,
|
||||
uint16_t *write_pos
|
||||
)
|
||||
{
|
||||
if (global_data->g_num_of_data_segments == 0) return 0;
|
||||
|
||||
*read_pos = queue->read_pos;
|
||||
*write_pos = queue->write_pos;
|
||||
|
||||
if (queue->num_of_data_segments != global_data->g_num_of_data_segments) return 0;
|
||||
if (queue->size_of_memory != global_data->g_memory_size) return 0;
|
||||
if (*read_pos > global_data->g_num_of_data_segments) return 0;
|
||||
if (*write_pos > global_data->g_num_of_data_segments) return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void
|
||||
resetRingQueue(LoggingData *logging_data, SharedRingQueue *queue, uint16_t num_of_data_segments)
|
||||
{
|
||||
(void)logging_data;
|
||||
uint16_t *buffer_mgmt;
|
||||
unsigned int idx;
|
||||
|
||||
queue->read_pos = 0;
|
||||
queue->write_pos = 0;
|
||||
queue->num_of_data_segments = num_of_data_segments;
|
||||
buffer_mgmt = (uint16_t *)queue->mgmt_segment.data;
|
||||
for (idx = 0; idx < queue->num_of_data_segments; idx++) {
|
||||
buffer_mgmt[idx] = empty_buff_mgmt_magic;
|
||||
}
|
||||
}
|
||||
|
||||
SharedRingGlobalData *
|
||||
createSharedRingGlobalData(LoggingData *logging_data)
|
||||
{
|
||||
SharedRingGlobalData *global_data = (SharedRingGlobalData *)malloc(sizeof(SharedRingGlobalData));
|
||||
if (global_data == NULL) {
|
||||
writeDebug(logging_data, WarningLevel, "Failed to allocate memory for global data\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
global_data->g_rx_fd = -1;
|
||||
global_data->g_tx_fd = -1;
|
||||
global_data->g_memory_size = -1;
|
||||
global_data->g_rx_location_name[0] = '\0';
|
||||
global_data->g_tx_location_name[0] = '\0';
|
||||
global_data->g_num_of_data_segments = 0;
|
||||
|
||||
return global_data;
|
||||
}
|
||||
|
||||
SharedRingQueue *
|
||||
createSharedRingQueue(
|
||||
LoggingData *logging_data,
|
||||
const char *shared_location_name,
|
||||
uint16_t num_of_data_segments,
|
||||
int is_owner,
|
||||
int is_tx,
|
||||
SharedRingGlobalData *global_data
|
||||
)
|
||||
{
|
||||
SharedRingQueue *queue = NULL;
|
||||
uint16_t *buffer_mgmt;
|
||||
uint16_t shmem_fd_flags = is_owner ? O_RDWR | O_CREAT : O_RDWR;
|
||||
int32_t fd = -1;
|
||||
uint32_t size_of_memory;
|
||||
unsigned int idx;
|
||||
|
||||
writeDebug(logging_data, TraceLevel, "Creating a new shared ring queue");
|
||||
|
||||
if (num_of_data_segments > max_num_of_data_segments) {
|
||||
writeDebug(
|
||||
logging_data,
|
||||
WarningLevel,
|
||||
"createSharedRingQueue: Cannot create data segment with %d elements (max number of elements is %u)\n",
|
||||
num_of_data_segments,
|
||||
max_num_of_data_segments
|
||||
);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
global_data->g_num_of_data_segments = num_of_data_segments;
|
||||
|
||||
fd = shm_open(shared_location_name, shmem_fd_flags, S_IRWXU | S_IRWXG | S_IRWXO);
|
||||
if (fd == -1) {
|
||||
writeDebug(
|
||||
logging_data,
|
||||
WarningLevel,
|
||||
"createSharedRingQueue: Failed to open shared memory for '%s'. Errno: %d\n",
|
||||
shared_location_name,
|
||||
errno
|
||||
);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size_of_memory = sizeof(SharedRingQueue) + (num_of_data_segments * sizeof(DataSegment));
|
||||
if (is_owner && ftruncate(fd, size_of_memory + 1) != 0) {
|
||||
writeDebug(
|
||||
logging_data,
|
||||
WarningLevel,
|
||||
"createSharedRingQueue: Failed to ftruncate shared memory '%s' to size '%x'\n",
|
||||
shared_location_name,
|
||||
size_of_memory
|
||||
);
|
||||
close(fd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
queue = (SharedRingQueue *)mmap(0, size_of_memory, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
if (queue == NULL) {
|
||||
writeDebug(
|
||||
logging_data,
|
||||
WarningLevel,
|
||||
"createSharedRingQueue: Error allocating queue for '%s' of size=%x\n",
|
||||
shared_location_name,
|
||||
size_of_memory
|
||||
);
|
||||
close(fd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (is_owner) {
|
||||
snprintf(queue->shared_location_name, MAX_ONE_WAY_QUEUE_NAME_LENGTH, "%s", shared_location_name);
|
||||
queue->num_of_data_segments = num_of_data_segments;
|
||||
queue->read_pos = 0;
|
||||
queue->write_pos = 0;
|
||||
queue->size_of_memory = size_of_memory;
|
||||
buffer_mgmt = (uint16_t *)queue->mgmt_segment.data;
|
||||
for (idx = 0; idx < queue->num_of_data_segments; idx++) {
|
||||
buffer_mgmt[idx] = empty_buff_mgmt_magic;
|
||||
}
|
||||
queue->owner_fd = fd;
|
||||
} else {
|
||||
queue->user_fd = fd;
|
||||
}
|
||||
|
||||
global_data->g_memory_size = size_of_memory;
|
||||
if (is_tx) {
|
||||
global_data->g_tx_fd = fd;
|
||||
snprintf(global_data->g_tx_location_name, MAX_ONE_WAY_QUEUE_NAME_LENGTH, "%s", shared_location_name);
|
||||
} else {
|
||||
global_data->g_rx_fd = fd;
|
||||
snprintf(global_data->g_rx_location_name, MAX_ONE_WAY_QUEUE_NAME_LENGTH, "%s", shared_location_name);
|
||||
}
|
||||
|
||||
writeDebug(
|
||||
logging_data,
|
||||
TraceLevel,
|
||||
"Successfully created a new shared ring queue. "
|
||||
"Shared memory path: %s, number of segments: %u, is owner: %d, "
|
||||
"fd flags: %u, fd: %d, memory size: %u, read index: %u, write index: %u",
|
||||
shared_location_name,
|
||||
queue->num_of_data_segments,
|
||||
is_owner,
|
||||
shmem_fd_flags,
|
||||
fd,
|
||||
queue->size_of_memory,
|
||||
queue->read_pos,
|
||||
queue->write_pos
|
||||
);
|
||||
|
||||
return queue;
|
||||
}
|
||||
|
||||
void
|
||||
destroySharedRingQueue(
|
||||
LoggingData *logging_data,
|
||||
SharedRingQueue *queue,
|
||||
SharedRingGlobalData *global_data,
|
||||
int is_owner,
|
||||
int is_tx
|
||||
)
|
||||
{
|
||||
uint32_t size_of_memory = global_data->g_memory_size;
|
||||
int32_t fd = 0;
|
||||
|
||||
if(is_owner) {
|
||||
queue->owner_fd = 0;
|
||||
} else {
|
||||
queue->user_fd = 0;
|
||||
}
|
||||
|
||||
if (is_tx) {
|
||||
fd = global_data->g_tx_fd;
|
||||
global_data->g_tx_fd = -1;
|
||||
} else {
|
||||
fd = global_data->g_rx_fd;
|
||||
global_data->g_rx_fd = -1;
|
||||
}
|
||||
|
||||
if (munmap(queue, size_of_memory) != 0) {
|
||||
writeDebug(logging_data, WarningLevel, "destroySharedRingQueue: Failed to unmap shared ring queue\n");
|
||||
}
|
||||
if (fd > 0) close(fd);
|
||||
fd = 0;
|
||||
|
||||
// shm_open cleanup
|
||||
if(is_owner) {
|
||||
shm_unlink(is_tx ? global_data->g_tx_location_name : global_data->g_rx_location_name);
|
||||
}
|
||||
writeDebug(logging_data, TraceLevel, "Successfully destroyed shared ring queue. Is owner: %d", is_owner);
|
||||
}
|
||||
|
||||
void
|
||||
dumpRingQueueShmem(LoggingData *logging_data, SharedRingQueue *queue)
|
||||
{
|
||||
uint16_t segment_idx;
|
||||
uint16_t data_idx;
|
||||
uint16_t *buffer_mgmt = NULL;
|
||||
char data_byte;
|
||||
|
||||
writeDebug(
|
||||
logging_data,
|
||||
WarningLevel,
|
||||
"owner_fd: %d, user_fd: %d, size_of_memory: %d, write_pos: %d, read_pos: %d, num_of_data_segments: %d\n",
|
||||
queue->owner_fd,
|
||||
queue->user_fd,
|
||||
queue->size_of_memory,
|
||||
queue->write_pos,
|
||||
queue->read_pos,
|
||||
queue->num_of_data_segments
|
||||
);
|
||||
|
||||
writeDebug(logging_data, WarningLevel, "mgmt_segment:");
|
||||
buffer_mgmt = (uint16_t *)queue->mgmt_segment.data;
|
||||
for (segment_idx = 0; segment_idx < queue->num_of_data_segments; segment_idx++) {
|
||||
writeDebug(logging_data, WarningLevel, "%s%u", (segment_idx == 0 ? " " : ", "), buffer_mgmt[segment_idx]);
|
||||
}
|
||||
|
||||
writeDebug(logging_data, WarningLevel, "\ndata_segment: ");
|
||||
for (segment_idx = 0; segment_idx < queue->num_of_data_segments; segment_idx++) {
|
||||
writeDebug(
|
||||
logging_data,
|
||||
WarningLevel,
|
||||
"\nMgmt index: %u, value: %u,\nactual data: ",
|
||||
segment_idx,
|
||||
buffer_mgmt[segment_idx]
|
||||
);
|
||||
for (data_idx = 0; data_idx < SHARED_MEMORY_SEGMENT_ENTRY_SIZE; data_idx++) {
|
||||
data_byte = queue->data_segment[segment_idx].data[data_idx];
|
||||
writeDebug(logging_data, WarningLevel, isprint(data_byte) ? "%c" : "%02X", data_byte);
|
||||
}
|
||||
}
|
||||
writeDebug(logging_data, WarningLevel, "\nEnd of memory\n");
|
||||
}
|
||||
|
||||
int
|
||||
peekToQueue(
|
||||
LoggingData *logging_data,
|
||||
SharedRingQueue *queue,
|
||||
SharedRingGlobalData *global_data,
|
||||
const char **output_buffer,
|
||||
uint16_t *output_buffer_size
|
||||
)
|
||||
{
|
||||
uint16_t read_pos;
|
||||
uint16_t write_pos;
|
||||
uint16_t *buffer_mgmt = (uint16_t *)queue->mgmt_segment.data;
|
||||
|
||||
if (!isGetPossitionSucceccful(queue, global_data, &read_pos, &write_pos)) {
|
||||
writeDebug(logging_data, WarningLevel, "Corrupted shared memory - cannot peek");
|
||||
return -1;
|
||||
}
|
||||
|
||||
writeDebug(
|
||||
logging_data,
|
||||
TraceLevel,
|
||||
"Reading data from queue. Read index: %u, number of queue elements: %u",
|
||||
read_pos,
|
||||
global_data->g_num_of_data_segments
|
||||
);
|
||||
|
||||
if (read_pos == write_pos) {
|
||||
writeDebug(logging_data, WarningLevel, "peekToQueue: Failed to read from an empty queue\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (read_pos >= global_data->g_num_of_data_segments) {
|
||||
writeDebug(
|
||||
logging_data,
|
||||
WarningLevel,
|
||||
"peekToQueue: Failed to read from a corrupted queue! (read_pos= %d > num_of_data_segments=%d)\n",
|
||||
read_pos,
|
||||
global_data->g_num_of_data_segments
|
||||
);
|
||||
return CORRUPTED_SHMEM_ERROR;
|
||||
}
|
||||
|
||||
if (buffer_mgmt[read_pos] == skip_buff_mgmt_magic) {
|
||||
for ( ; read_pos < global_data->g_num_of_data_segments &&
|
||||
buffer_mgmt[read_pos] == skip_buff_mgmt_magic;
|
||||
++read_pos) {
|
||||
buffer_mgmt[read_pos] = empty_buff_mgmt_magic;
|
||||
}
|
||||
}
|
||||
|
||||
if (read_pos == global_data->g_num_of_data_segments) read_pos = 0;
|
||||
|
||||
*output_buffer_size = buffer_mgmt[read_pos];
|
||||
*output_buffer = queue->data_segment[read_pos].data;
|
||||
|
||||
queue->read_pos = read_pos;
|
||||
|
||||
writeDebug(
|
||||
logging_data,
|
||||
TraceLevel,
|
||||
"Successfully read data from queue. Data size: %u, new Read index: %u",
|
||||
*output_buffer_size,
|
||||
queue->read_pos
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
pushBuffersToQueue(
|
||||
LoggingData *logging_data,
|
||||
SharedRingQueue *queue,
|
||||
SharedRingGlobalData *global_data,
|
||||
const char **input_buffers,
|
||||
const uint16_t *input_buffers_sizes,
|
||||
const uint8_t num_of_input_buffers
|
||||
)
|
||||
{
|
||||
int idx;
|
||||
uint32_t large_total_elem_size = 0;
|
||||
uint16_t read_pos;
|
||||
uint16_t write_pos;
|
||||
uint16_t total_elem_size;
|
||||
uint16_t *buffer_mgmt = (uint16_t *)queue->mgmt_segment.data;
|
||||
uint16_t end_pos;
|
||||
uint16_t num_of_segments_to_write;
|
||||
char *current_copy_pos;
|
||||
|
||||
if (!isGetPossitionSucceccful(queue, global_data, &read_pos, &write_pos)) {
|
||||
writeDebug(logging_data, WarningLevel, "Corrupted shared memory - cannot push new buffers");
|
||||
return -1;
|
||||
}
|
||||
|
||||
writeDebug(
|
||||
logging_data,
|
||||
TraceLevel,
|
||||
"Writing new data to queue. write index: %u, number of queue elements: %u, number of elements to push: %u",
|
||||
write_pos,
|
||||
global_data->g_num_of_data_segments,
|
||||
num_of_input_buffers
|
||||
);
|
||||
|
||||
for (idx = 0; idx < num_of_input_buffers; idx++) {
|
||||
large_total_elem_size += input_buffers_sizes[idx];
|
||||
|
||||
if (large_total_elem_size > max_write_size) {
|
||||
writeDebug(
|
||||
logging_data,
|
||||
WarningLevel,
|
||||
"Requested write size %u exceeds the %u write limit",
|
||||
large_total_elem_size,
|
||||
max_write_size
|
||||
);
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
total_elem_size = (uint16_t)large_total_elem_size;
|
||||
|
||||
num_of_segments_to_write = getNumOfDataSegmentsNeeded(logging_data, total_elem_size);
|
||||
|
||||
writeDebug(
|
||||
logging_data,
|
||||
TraceLevel,
|
||||
"Checking if there is enough space to push new data. Total new data size: %u, number of segments needed: %u",
|
||||
total_elem_size,
|
||||
num_of_segments_to_write
|
||||
);
|
||||
|
||||
|
||||
if (!isThereEnoughMemoryInQueue(logging_data, global_data, write_pos, read_pos, num_of_segments_to_write)) {
|
||||
writeDebug(logging_data, DebugLevel, "Cannot write to a full queue");
|
||||
return -3;
|
||||
}
|
||||
|
||||
if (write_pos >= global_data->g_num_of_data_segments) {
|
||||
writeDebug(
|
||||
logging_data,
|
||||
DebugLevel,
|
||||
"Cannot write to a location outside the queue. Write index: %u, number of queue elements: %u",
|
||||
write_pos,
|
||||
global_data->g_num_of_data_segments
|
||||
);
|
||||
return -4;
|
||||
}
|
||||
|
||||
if (write_pos + num_of_segments_to_write > global_data->g_num_of_data_segments) {
|
||||
for ( ; write_pos < global_data->g_num_of_data_segments; ++write_pos) {
|
||||
buffer_mgmt[write_pos] = skip_buff_mgmt_magic;
|
||||
}
|
||||
write_pos = 0;
|
||||
}
|
||||
|
||||
writeDebug(
|
||||
logging_data,
|
||||
TraceLevel,
|
||||
"Setting new management data. Write index: %u, total elements in index: %u",
|
||||
write_pos,
|
||||
total_elem_size
|
||||
);
|
||||
|
||||
buffer_mgmt[write_pos] = total_elem_size;
|
||||
current_copy_pos = queue->data_segment[write_pos].data;
|
||||
for (idx = 0; idx < num_of_input_buffers; idx++) {
|
||||
writeDebug(
|
||||
logging_data,
|
||||
TraceLevel,
|
||||
"Writing data to queue. Data index: %u, data size: %u, copy destination: %p",
|
||||
idx,
|
||||
input_buffers_sizes[idx],
|
||||
current_copy_pos
|
||||
);
|
||||
memcpy(current_copy_pos, input_buffers[idx], input_buffers_sizes[idx]);
|
||||
current_copy_pos += input_buffers_sizes[idx];
|
||||
}
|
||||
write_pos++;
|
||||
|
||||
end_pos = write_pos + num_of_segments_to_write - 1;
|
||||
for ( ; write_pos < end_pos; ++write_pos) {
|
||||
buffer_mgmt[write_pos] = skip_buff_mgmt_magic;
|
||||
}
|
||||
|
||||
if (write_pos >= global_data->g_num_of_data_segments) write_pos = 0;
|
||||
queue->write_pos = write_pos;
|
||||
writeDebug(logging_data, TraceLevel, "Successfully pushed data to queue. New write index: %u", write_pos);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
pushToQueue(
|
||||
LoggingData *logging_data,
|
||||
SharedRingQueue *queue,
|
||||
SharedRingGlobalData *global_data,
|
||||
const char *input_buffer,
|
||||
const uint16_t input_buffer_size
|
||||
)
|
||||
{
|
||||
return pushBuffersToQueue(logging_data, queue, global_data, &input_buffer, &input_buffer_size, 1);
|
||||
}
|
||||
|
||||
int
|
||||
popFromQueue(LoggingData *logging_data, SharedRingQueue *queue, SharedRingGlobalData *global_data)
|
||||
{
|
||||
uint16_t num_of_read_segments;
|
||||
uint16_t read_pos;
|
||||
uint16_t write_pos;
|
||||
uint16_t end_pos;
|
||||
uint16_t *buffer_mgmt = (uint16_t *)queue->mgmt_segment.data;
|
||||
|
||||
if (!isGetPossitionSucceccful(queue, global_data, &read_pos, &write_pos)) {
|
||||
writeDebug(logging_data, WarningLevel, "Corrupted shared memory - cannot pop data");
|
||||
return -1;
|
||||
}
|
||||
|
||||
writeDebug(
|
||||
logging_data,
|
||||
TraceLevel,
|
||||
"Removing data from queue. new data to queue. Read index: %u, number of queue elements: %u",
|
||||
read_pos,
|
||||
global_data->g_num_of_data_segments
|
||||
);
|
||||
|
||||
if (read_pos == write_pos) {
|
||||
writeDebug(logging_data, TraceLevel, "Cannot pop data from empty queue");
|
||||
return -1;
|
||||
}
|
||||
num_of_read_segments = getNumOfDataSegmentsNeeded(logging_data, buffer_mgmt[read_pos]);
|
||||
|
||||
if (read_pos + num_of_read_segments > global_data->g_num_of_data_segments) {
|
||||
for ( ; read_pos < global_data->g_num_of_data_segments; ++read_pos ) {
|
||||
buffer_mgmt[read_pos] = empty_buff_mgmt_magic;
|
||||
}
|
||||
read_pos = 0;
|
||||
}
|
||||
|
||||
end_pos = read_pos + num_of_read_segments;
|
||||
|
||||
for ( ; read_pos < end_pos; ++read_pos ) {
|
||||
buffer_mgmt[read_pos] = empty_buff_mgmt_magic;
|
||||
}
|
||||
|
||||
if (read_pos < global_data->g_num_of_data_segments && buffer_mgmt[read_pos] == skip_buff_mgmt_magic) {
|
||||
for ( ; read_pos < global_data->g_num_of_data_segments; ++read_pos ) {
|
||||
buffer_mgmt[read_pos] = empty_buff_mgmt_magic;
|
||||
}
|
||||
}
|
||||
|
||||
writeDebug(
|
||||
logging_data,
|
||||
TraceLevel,
|
||||
"Size of data to remove: %u, number of queue elements to free: %u, current read index: %u, end index: %u",
|
||||
buffer_mgmt[read_pos],
|
||||
num_of_read_segments,
|
||||
read_pos,
|
||||
end_pos
|
||||
);
|
||||
|
||||
if (read_pos == global_data->g_num_of_data_segments) read_pos = 0;
|
||||
|
||||
queue->read_pos = read_pos;
|
||||
writeDebug(logging_data, TraceLevel, "Successfully popped data from queue. New read index: %u", read_pos);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
isQueueEmpty(SharedRingQueue *queue)
|
||||
{
|
||||
return queue->read_pos == queue->write_pos;
|
||||
}
|
||||
|
||||
int
|
||||
isCorruptedQueue(LoggingData *logging_data, SharedRingQueue *queue, SharedRingGlobalData *global_data, int is_tx)
|
||||
{
|
||||
writeDebug(
|
||||
logging_data,
|
||||
TraceLevel,
|
||||
"Checking if shared ring queue is corrupted. "
|
||||
"g_num_of_data_segments = %u, queue->num_of_data_segments = %u, queue->read_pos = %u, queue->write_pos = %u, "
|
||||
"g_memory_size = %d, queue->size_of_memory = %d, "
|
||||
"queue->shared_location_name = %s, g_tx_location_name = %s, g_rx_location_name = %s, is_tx = %d",
|
||||
global_data->g_num_of_data_segments,
|
||||
queue->num_of_data_segments,
|
||||
queue->read_pos,
|
||||
queue->write_pos,
|
||||
global_data->g_memory_size,
|
||||
queue->size_of_memory,
|
||||
queue->shared_location_name,
|
||||
global_data->g_tx_location_name,
|
||||
global_data->g_rx_location_name,
|
||||
is_tx
|
||||
);
|
||||
|
||||
if (global_data->g_num_of_data_segments == 0) return 0;
|
||||
|
||||
if (queue->num_of_data_segments != global_data->g_num_of_data_segments) return 1;
|
||||
if (queue->size_of_memory != global_data->g_memory_size) return 1;
|
||||
if (queue->read_pos > global_data->g_num_of_data_segments) return 1;
|
||||
if (queue->write_pos > global_data->g_num_of_data_segments) return 1;
|
||||
if (strcmp(
|
||||
queue->shared_location_name,
|
||||
is_tx ? global_data->g_tx_location_name : global_data->g_rx_location_name
|
||||
) != 0
|
||||
) return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
// LCOV_EXCL_STOP
|
114
core/shmem_ipc_2/shared_ring_queue.h
Executable file
114
core/shmem_ipc_2/shared_ring_queue.h
Executable file
@@ -0,0 +1,114 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __SHARED_RING_QUEUE_H__
|
||||
#define __SHARED_RING_QUEUE_H__
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
|
||||
#define SHARED_MEMORY_SEGMENT_ENTRY_SIZE 1024
|
||||
#define MAX_ONE_WAY_QUEUE_NAME_LENGTH 64
|
||||
#define CORRUPTED_SHMEM_ERROR -2
|
||||
|
||||
typedef struct LoggingData LoggingData;
|
||||
|
||||
typedef struct SharedRingGlobalData {
|
||||
char g_rx_location_name[MAX_ONE_WAY_QUEUE_NAME_LENGTH];
|
||||
char g_tx_location_name[MAX_ONE_WAY_QUEUE_NAME_LENGTH];
|
||||
int32_t g_rx_fd;
|
||||
int32_t g_tx_fd;
|
||||
int32_t g_memory_size;
|
||||
uint16_t g_num_of_data_segments;
|
||||
} SharedRingGlobalData;
|
||||
|
||||
typedef struct DataSegment {
|
||||
char data[SHARED_MEMORY_SEGMENT_ENTRY_SIZE];
|
||||
} DataSegment;
|
||||
|
||||
typedef struct __attribute__((__packed__)) SharedRingQueue {
|
||||
char shared_location_name[MAX_ONE_WAY_QUEUE_NAME_LENGTH];
|
||||
int32_t owner_fd;
|
||||
int32_t user_fd;
|
||||
int32_t size_of_memory;
|
||||
uint16_t write_pos;
|
||||
uint16_t read_pos;
|
||||
uint16_t num_of_data_segments;
|
||||
DataSegment mgmt_segment;
|
||||
DataSegment data_segment[0];
|
||||
} SharedRingQueue;
|
||||
|
||||
SharedRingQueue *
|
||||
createSharedRingQueue(
|
||||
LoggingData *logging_data,
|
||||
const char *shared_location_name,
|
||||
uint16_t num_of_data_segments,
|
||||
int is_owner,
|
||||
int is_tx,
|
||||
SharedRingGlobalData *global_data
|
||||
);
|
||||
|
||||
SharedRingGlobalData * createSharedRingGlobalData(LoggingData *logging_data);
|
||||
|
||||
void destroySharedRingQueue(
|
||||
LoggingData *logging_data,
|
||||
SharedRingQueue *queue,
|
||||
SharedRingGlobalData *global_data,
|
||||
int is_owner,
|
||||
int is_tx
|
||||
);
|
||||
int isQueueEmpty(SharedRingQueue *queue);
|
||||
int isCorruptedQueue(LoggingData *logging_data, SharedRingQueue *queue, SharedRingGlobalData *global_data, int is_tx);
|
||||
|
||||
int peekToQueue(
|
||||
LoggingData *logging_data,
|
||||
SharedRingQueue *queue,
|
||||
SharedRingGlobalData *global_data,
|
||||
const char **output_buffer,
|
||||
uint16_t *output_buffer_size
|
||||
);
|
||||
|
||||
int popFromQueue(LoggingData *logging_data, SharedRingQueue *queue, SharedRingGlobalData *global_data);
|
||||
|
||||
int pushToQueue(
|
||||
LoggingData *logging_data,
|
||||
SharedRingQueue *queue,
|
||||
SharedRingGlobalData *global_data,
|
||||
const char *input_buffer,
|
||||
const uint16_t input_buffer_size
|
||||
);
|
||||
|
||||
void resetRingQueue(LoggingData *logging_data, SharedRingQueue *queue, uint16_t num_of_data_segments);
|
||||
void dumpRingQueueShmem(LoggingData *logging_data, SharedRingQueue *queue);
|
||||
|
||||
int
|
||||
pushBuffersToQueue(
|
||||
LoggingData *logging_data,
|
||||
SharedRingQueue *queue,
|
||||
SharedRingGlobalData *global_data,
|
||||
const char **input_buffers,
|
||||
const uint16_t *input_buffers_sizes,
|
||||
const uint8_t num_of_input_buffers
|
||||
);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif // __SHARED_RING_QUEUE_H__
|
419
core/shmem_ipc_2/shmem_ipc.c
Executable file
419
core/shmem_ipc_2/shmem_ipc.c
Executable file
@@ -0,0 +1,419 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "shmem_ipc_2.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <sys/file.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
|
||||
#include "shared_ring_queue.h"
|
||||
#include "shared_ipc_debug.h"
|
||||
|
||||
#define UNUSED(x) (void)(x)
|
||||
|
||||
const int corrupted_shmem_error = CORRUPTED_SHMEM_ERROR;
|
||||
static const size_t max_one_way_queue_name_length = MAX_ONE_WAY_QUEUE_NAME_LENGTH;
|
||||
static const size_t max_shmem_path_length = 72;
|
||||
|
||||
struct SharedMemoryIPC {
|
||||
char shm_name[32];
|
||||
SharedRingQueue *rx_queue;
|
||||
SharedRingQueue *tx_queue;
|
||||
SharedRingGlobalData *global_data;
|
||||
LoggingData logging_data;
|
||||
};
|
||||
|
||||
// LCOV_EXCL_START Reason: Handing it to Envoy prototype development
|
||||
|
||||
void
|
||||
debugInitial(
|
||||
const LoggingData *loggin_data,
|
||||
uint32_t worker_id,
|
||||
int is_error,
|
||||
const char *func,
|
||||
const char *file,
|
||||
int line_num,
|
||||
const char *fmt,
|
||||
...
|
||||
)
|
||||
{
|
||||
UNUSED(is_error);
|
||||
UNUSED(func);
|
||||
UNUSED(file);
|
||||
UNUSED(line_num);
|
||||
UNUSED(loggin_data);
|
||||
UNUSED(worker_id);
|
||||
|
||||
// Temporarily disabled till Shmem debugging is properly fixed.
|
||||
// va_list args;
|
||||
// va_start(args, fmt);
|
||||
// vprintf(fmt, args);
|
||||
// va_end(args);
|
||||
|
||||
UNUSED(fmt);
|
||||
}
|
||||
|
||||
void (*debug_int)(
|
||||
const LoggingData *loggin_data,
|
||||
uint32_t worker_id,
|
||||
int is_error,
|
||||
const char *func,
|
||||
const char *file,
|
||||
int line_num,
|
||||
const char *fmt,
|
||||
...
|
||||
) = debugInitial;
|
||||
|
||||
|
||||
static int
|
||||
isTowardsOwner(int is_owner, int is_tx)
|
||||
{
|
||||
if (is_owner) return !is_tx;
|
||||
return is_tx;
|
||||
}
|
||||
|
||||
static SharedRingQueue *
|
||||
createOneWayIPCQueue(
|
||||
LoggingData *logging_data,
|
||||
const char *name,
|
||||
const uint32_t user_id,
|
||||
const uint32_t group_id,
|
||||
int is_tx_queue,
|
||||
int is_owner,
|
||||
uint16_t num_of_queue_elem,
|
||||
SharedRingGlobalData *global_data
|
||||
)
|
||||
{
|
||||
SharedRingQueue *ring_queue = NULL;
|
||||
char queue_name[max_one_way_queue_name_length];
|
||||
char shmem_path[max_shmem_path_length];
|
||||
const char *direction = isTowardsOwner(is_owner, is_tx_queue) ? "rx" : "tx";
|
||||
snprintf(queue_name, sizeof(queue_name) - 1, "__cp_nano_%s_shared_memory_%s__", direction, name);
|
||||
|
||||
writeDebug(
|
||||
logging_data,
|
||||
TraceLevel,
|
||||
"Creating one way IPC queue. Name: %s, direction: %s, size: %d",
|
||||
name,
|
||||
direction,
|
||||
num_of_queue_elem
|
||||
);
|
||||
ring_queue = createSharedRingQueue(
|
||||
logging_data,
|
||||
queue_name,
|
||||
num_of_queue_elem,
|
||||
is_owner,
|
||||
isTowardsOwner(is_owner, is_tx_queue),
|
||||
global_data
|
||||
);
|
||||
if (ring_queue == NULL) {
|
||||
writeDebug(
|
||||
logging_data,
|
||||
WarningLevel,
|
||||
"Failed to create %s shared ring queue of size=%d for '%s'\n",
|
||||
direction,
|
||||
num_of_queue_elem,
|
||||
queue_name
|
||||
);
|
||||
return NULL;
|
||||
}
|
||||
int ret = snprintf(shmem_path, sizeof(shmem_path) - 1, "/dev/shm/%s", queue_name);
|
||||
if (ret < 0 || (size_t)ret < (strlen(direction) + strlen(name))) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (is_owner && chmod(shmem_path, 0666) == -1) {
|
||||
writeDebug(logging_data, WarningLevel, "Failed to set the permissions");
|
||||
destroySharedRingQueue(logging_data, ring_queue, global_data, is_owner, isTowardsOwner(is_owner, is_tx_queue));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
writeDebug(
|
||||
logging_data,
|
||||
TraceLevel,
|
||||
"Successfully created one way IPC queue. "
|
||||
"Name: %s, user id: %u, group id: %u, is owner: %d, number of queue elements: %u, direction: %s, path: %s",
|
||||
queue_name,
|
||||
user_id,
|
||||
group_id,
|
||||
is_owner,
|
||||
num_of_queue_elem,
|
||||
direction,
|
||||
shmem_path
|
||||
);
|
||||
return ring_queue;
|
||||
}
|
||||
|
||||
LoggingData *
|
||||
initLoggingData(int dbg_level, int worker_id, int fd)
|
||||
{
|
||||
LoggingData *logging_data = malloc(sizeof(LoggingData));
|
||||
if (logging_data == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
logging_data->dbg_level = dbg_level;
|
||||
logging_data->worker_id = worker_id;
|
||||
logging_data->fd = fd;
|
||||
return logging_data;
|
||||
}
|
||||
|
||||
SharedMemoryIPC *
|
||||
initIpc(
|
||||
const char queue_name[32],
|
||||
uint32_t user_id,
|
||||
uint32_t group_id,
|
||||
int is_owner,
|
||||
uint16_t num_of_queue_elem,
|
||||
const LoggingData *logging_data,
|
||||
void (*debug_func)(
|
||||
const LoggingData *loggin_data,
|
||||
uint32_t worker_id,
|
||||
int is_error,
|
||||
const char *func,
|
||||
const char *file,
|
||||
int line_num,
|
||||
const char *fmt,
|
||||
...
|
||||
)
|
||||
)
|
||||
{
|
||||
UNUSED(debug_func);
|
||||
SharedMemoryIPC *ipc = NULL;
|
||||
// debug_int = debug_func;
|
||||
debug_int = debugInitial;
|
||||
|
||||
writeDebug(
|
||||
logging_data,
|
||||
TraceLevel,
|
||||
"Initializing new IPC. "
|
||||
"Queue name: %s, user id: %u, group id: %u, is owner: %d, number of queue elements: %u\n",
|
||||
queue_name,
|
||||
user_id,
|
||||
group_id,
|
||||
is_owner,
|
||||
num_of_queue_elem
|
||||
);
|
||||
|
||||
ipc = malloc(sizeof(SharedMemoryIPC));
|
||||
if (ipc == NULL) {
|
||||
writeDebug(logging_data, WarningLevel, "Failed to allocate Shared Memory IPC for '%s'\n", queue_name);
|
||||
debug_int = debugInitial;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ipc->logging_data.dbg_level = logging_data->dbg_level;
|
||||
ipc->logging_data.worker_id = logging_data->worker_id;
|
||||
ipc->logging_data.fd = logging_data->fd;
|
||||
|
||||
ipc->global_data = createSharedRingGlobalData(&(ipc->logging_data));
|
||||
if (ipc->global_data == NULL) {
|
||||
writeDebug(logging_data, WarningLevel, "Failed to allocate global data for '%s'\n", queue_name);
|
||||
debug_int = debugInitial;
|
||||
free(ipc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ipc->rx_queue = NULL;
|
||||
ipc->tx_queue = NULL;
|
||||
|
||||
ipc->rx_queue = createOneWayIPCQueue(
|
||||
&(ipc->logging_data),
|
||||
queue_name,
|
||||
user_id,
|
||||
group_id,
|
||||
0,
|
||||
is_owner,
|
||||
num_of_queue_elem,
|
||||
ipc->global_data
|
||||
);
|
||||
if (ipc->rx_queue == NULL) {
|
||||
writeDebug(
|
||||
&(ipc->logging_data),
|
||||
WarningLevel,
|
||||
"Failed to allocate rx queue. "
|
||||
"Queue name: %s, user id: %u, group id: %u, is owner: %d, number of queue elements: %u",
|
||||
queue_name,
|
||||
user_id,
|
||||
group_id,
|
||||
is_owner,
|
||||
num_of_queue_elem
|
||||
);
|
||||
|
||||
destroyIpc(ipc, is_owner);
|
||||
debug_int = debugInitial;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ipc->tx_queue = createOneWayIPCQueue(
|
||||
&(ipc->logging_data),
|
||||
queue_name,
|
||||
user_id,
|
||||
group_id,
|
||||
1,
|
||||
is_owner,
|
||||
num_of_queue_elem,
|
||||
ipc->global_data
|
||||
);
|
||||
if (ipc->tx_queue == NULL) {
|
||||
writeDebug(
|
||||
&(ipc->logging_data),
|
||||
WarningLevel,
|
||||
"Failed to allocate rx queue. "
|
||||
"Queue name: %s, user id: %u, group id: %u, is owner: %d, number of queue elements: %u",
|
||||
queue_name,
|
||||
user_id,
|
||||
group_id,
|
||||
is_owner,
|
||||
num_of_queue_elem
|
||||
);
|
||||
destroyIpc(ipc, is_owner);
|
||||
debug_int = debugInitial;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
writeDebug(&(ipc->logging_data), TraceLevel, "Successfully allocated IPC");
|
||||
|
||||
strncpy(ipc->shm_name, queue_name, sizeof(ipc->shm_name));
|
||||
return ipc;
|
||||
}
|
||||
|
||||
void
|
||||
resetIpc(SharedMemoryIPC *ipc, uint16_t num_of_data_segments)
|
||||
{
|
||||
writeDebug(&(ipc->logging_data), TraceLevel, "Reseting IPC queues\n");
|
||||
resetRingQueue(&(ipc->logging_data), ipc->rx_queue, num_of_data_segments);
|
||||
resetRingQueue(&(ipc->logging_data), ipc->tx_queue, num_of_data_segments);
|
||||
}
|
||||
|
||||
void
|
||||
destroyIpc(SharedMemoryIPC *shmem, int is_owner)
|
||||
{
|
||||
writeDebug(&(shmem->logging_data), TraceLevel, "Destroying IPC queues\n");
|
||||
|
||||
if (shmem->rx_queue != NULL) {
|
||||
destroySharedRingQueue(
|
||||
&(shmem->logging_data),
|
||||
shmem->rx_queue,
|
||||
shmem->global_data,
|
||||
is_owner,
|
||||
isTowardsOwner(is_owner, 0)
|
||||
);
|
||||
shmem->rx_queue = NULL;
|
||||
}
|
||||
if (shmem->tx_queue != NULL) {
|
||||
destroySharedRingQueue(
|
||||
&(shmem->logging_data),
|
||||
shmem->tx_queue,
|
||||
shmem->global_data,
|
||||
is_owner,
|
||||
isTowardsOwner(is_owner, 1)
|
||||
);
|
||||
shmem->tx_queue = NULL;
|
||||
}
|
||||
free(shmem->global_data);
|
||||
debug_int = debugInitial;
|
||||
free(shmem);
|
||||
}
|
||||
|
||||
void
|
||||
dumpIpcMemory(SharedMemoryIPC *ipc)
|
||||
{
|
||||
writeDebug(&(ipc->logging_data), WarningLevel, "Ipc memory dump:\n");
|
||||
writeDebug(&(ipc->logging_data), WarningLevel, "RX queue:\n");
|
||||
dumpRingQueueShmem(&(ipc->logging_data), ipc->rx_queue);
|
||||
writeDebug(&(ipc->logging_data), WarningLevel, "TX queue:\n");
|
||||
dumpRingQueueShmem(&(ipc->logging_data), ipc->tx_queue);
|
||||
}
|
||||
|
||||
int
|
||||
sendData(SharedMemoryIPC *ipc, const uint16_t data_to_send_size, const char *data_to_send)
|
||||
{
|
||||
writeDebug(&(ipc->logging_data), TraceLevel, "Sending data of size %u\n", data_to_send_size);
|
||||
return pushToQueue(&(ipc->logging_data), ipc->tx_queue, ipc->global_data, data_to_send, data_to_send_size);
|
||||
}
|
||||
|
||||
int
|
||||
sendChunkedData(
|
||||
SharedMemoryIPC *ipc,
|
||||
const uint16_t *data_to_send_sizes,
|
||||
const char **data_elem_to_send,
|
||||
const uint8_t num_of_data_elem
|
||||
)
|
||||
{
|
||||
writeDebug(&(ipc->logging_data), TraceLevel, "Sending %u chunks of data\n", num_of_data_elem);
|
||||
|
||||
return pushBuffersToQueue(
|
||||
&(ipc->logging_data),
|
||||
ipc->tx_queue,
|
||||
ipc->global_data,
|
||||
data_elem_to_send,
|
||||
data_to_send_sizes,
|
||||
num_of_data_elem
|
||||
);
|
||||
}
|
||||
|
||||
int
|
||||
receiveData(SharedMemoryIPC *ipc, uint16_t *received_data_size, const char **received_data)
|
||||
{
|
||||
int res = peekToQueue(&(ipc->logging_data), ipc->rx_queue, ipc->global_data, received_data, received_data_size);
|
||||
writeDebug(
|
||||
&(ipc->logging_data),
|
||||
TraceLevel,
|
||||
"Received data from queue. Res: %d, data size: %u\n",
|
||||
res,
|
||||
*received_data_size
|
||||
);
|
||||
return res;
|
||||
}
|
||||
|
||||
int
|
||||
popData(SharedMemoryIPC *ipc)
|
||||
{
|
||||
int res = popFromQueue(&(ipc->logging_data), ipc->rx_queue, ipc->global_data);
|
||||
writeDebug(&(ipc->logging_data), TraceLevel, "Popped data from queue. Res: %d\n", res);
|
||||
return res;
|
||||
}
|
||||
|
||||
int
|
||||
isDataAvailable(SharedMemoryIPC *ipc)
|
||||
{
|
||||
int res = !isQueueEmpty(ipc->rx_queue);
|
||||
writeDebug(&(ipc->logging_data), TraceLevel, "Checking if there is data pending to be read. Res: %d\n", res);
|
||||
return res;
|
||||
}
|
||||
|
||||
int
|
||||
isCorruptedShmem(SharedMemoryIPC *ipc, int is_owner)
|
||||
{
|
||||
if (isCorruptedQueue(&(ipc->logging_data), ipc->rx_queue, ipc->global_data, isTowardsOwner(is_owner, 0)) ||
|
||||
isCorruptedQueue(&(ipc->logging_data), ipc->tx_queue, ipc->global_data, isTowardsOwner(is_owner, 1))
|
||||
) {
|
||||
writeDebug(
|
||||
&(ipc->logging_data),
|
||||
WarningLevel,
|
||||
"Detected corrupted shared memory queue. Shared memory name: %s",
|
||||
ipc->shm_name
|
||||
);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
// LCOV_EXCL_STOP
|
Reference in New Issue
Block a user