Skip to content

Commit

Permalink
ipc: align icbmsg to no-multithreading
Browse files Browse the repository at this point in the history
Modify ICBMSG so that it could be used in no-multithreading
appliactions.

Signed-off-by: Magdalena Pastula <magdalena.pastula@nordicsemi.no>
  • Loading branch information
magp-nordic authored and carlescufi committed Sep 12, 2024
1 parent 12e8bc7 commit 5de1c09
Showing 1 changed file with 53 additions and 0 deletions.
53 changes: 53 additions & 0 deletions subsys/ipc/ipc_service/backends/ipc_icbmsg.c
Original file line number Diff line number Diff line change
Expand Up @@ -165,11 +165,13 @@ struct ept_data {
struct backend_data {
const struct icbmsg_config *conf;/* Backend instance config. */
struct icmsg_data_t control_data;/* ICMsg data. */
#ifdef CONFIG_MULTITHREADING
struct k_mutex mutex; /* Mutex to protect: ICMsg send call and
* waiting_bound field.
*/
struct k_work ep_bound_work; /* Work item for bounding processing. */
struct k_sem block_wait_sem; /* Semaphore for waiting for free blocks. */
#endif
struct ept_data ept[NUM_EPT]; /* Array of registered endpoints. */
uint8_t ept_map[NUM_EPT]; /* Array that maps endpoint address to index. */
uint16_t waiting_bound[NUM_EPT];/* The bound messages waiting to be registered. */
Expand Down Expand Up @@ -200,8 +202,10 @@ struct control_message {

BUILD_ASSERT(NUM_EPT <= EPT_ADDR_INVALID, "Too many endpoints");

#ifdef CONFIG_MULTITHREADING
/* Work queue for bounding processing. */
static struct k_work_q ep_bound_work_q;
#endif

/**
* Calculate pointer to block from its index and channel configuration (RX or TX).
Expand Down Expand Up @@ -318,12 +322,15 @@ static int alloc_tx_buffer(struct backend_data *dev_data, uint32_t *size,
size_t total_size = *size + BLOCK_HEADER_SIZE;
size_t num_blocks = DIV_ROUND_UP(total_size, conf->tx.block_size);
struct block_content *block;
#ifdef CONFIG_MULTITHREADING
bool sem_taken = false;
#endif
size_t tx_block_index;
size_t next_bit;
int prev_bit_val;
int r;

#ifdef CONFIG_MULTITHREADING
do {
/* Try to allocate specified number of blocks. */
r = sys_bitarray_alloc(conf->tx_usage_bitmap, num_blocks,
Expand All @@ -349,6 +356,10 @@ static int alloc_tx_buffer(struct backend_data *dev_data, uint32_t *size,
if (sem_taken) {
k_sem_give(&dev_data->block_wait_sem);
}
#else
/* Try to allocate specified number of blocks. */
r = sys_bitarray_alloc(conf->tx_usage_bitmap, num_blocks, &tx_block_index);
#endif

if (r < 0) {
if (r != -ENOSPC && r != -EAGAIN) {
Expand Down Expand Up @@ -448,8 +459,10 @@ static int release_tx_blocks(struct backend_data *dev_data, size_t tx_block_inde
return r;
}

#ifdef CONFIG_MULTITHREADING
/* Wake up all waiting threads. */
k_sem_give(&dev_data->block_wait_sem);
#endif
}

return tx_block_index;
Expand Down Expand Up @@ -497,10 +510,14 @@ static int send_control_message(struct backend_data *dev_data, enum msg_type msg
};
int r;

#ifdef CONFIG_MULTITHREADING
k_mutex_lock(&dev_data->mutex, K_FOREVER);
#endif
r = icmsg_send(&conf->control_config, &dev_data->control_data, &message,
sizeof(message));
#ifdef CONFIG_MULTITHREADING
k_mutex_unlock(&dev_data->mutex);
#endif
if (r < sizeof(message)) {
LOG_ERR("Cannot send over ICMsg, err %d", r);
}
Expand Down Expand Up @@ -676,21 +693,29 @@ static int send_bound_message(struct backend_data *dev_data, struct ept_data *ep
return r;
}

#ifdef CONFIG_MULTITHREADING
/**
* Put endpoint bound processing into system workqueue.
*/
static void schedule_ept_bound_process(struct backend_data *dev_data)
{
k_work_submit_to_queue(&ep_bound_work_q, &dev_data->ep_bound_work);
}
#endif

/**
* Work handler that is responsible to start bounding when ICMsg is bound.
*/
#ifdef CONFIG_MULTITHREADING
static void ept_bound_process(struct k_work *item)
#else
static void ept_bound_process(struct backend_data *dev_data)
#endif
{
#ifdef CONFIG_MULTITHREADING
struct backend_data *dev_data = CONTAINER_OF(item, struct backend_data,
ep_bound_work);
#endif
struct ept_data *ept = NULL;
size_t i;
int r = 0;
Expand All @@ -717,13 +742,19 @@ static void ept_bound_process(struct k_work *item)
}
} else {
/* Walk over all waiting bound messages and match to local endpoints. */
#ifdef CONFIG_MULTITHREADING
k_mutex_lock(&dev_data->mutex, K_FOREVER);
#endif
for (i = 0; i < NUM_EPT; i++) {
if (dev_data->waiting_bound[i] != WAITING_BOUND_MSG_EMPTY) {
#ifdef CONFIG_MULTITHREADING
k_mutex_unlock(&dev_data->mutex);
#endif
r = match_bound_msg(dev_data,
dev_data->waiting_bound[i], i);
#ifdef CONFIG_MULTITHREADING
k_mutex_lock(&dev_data->mutex, K_FOREVER);
#endif
if (r != 0) {
dev_data->waiting_bound[i] =
WAITING_BOUND_MSG_EMPTY;
Expand All @@ -733,7 +764,9 @@ static void ept_bound_process(struct k_work *item)
}
}
}
#ifdef CONFIG_MULTITHREADING
k_mutex_unlock(&dev_data->mutex);
#endif
}
}

Expand Down Expand Up @@ -853,12 +886,20 @@ static int received_bound(struct backend_data *dev_data, size_t rx_block_index,
}

/* Put message to waiting array. */
#ifdef CONFIG_MULTITHREADING
k_mutex_lock(&dev_data->mutex, K_FOREVER);
#endif
dev_data->waiting_bound[ept_addr] = rx_block_index;
#ifdef CONFIG_MULTITHREADING
k_mutex_unlock(&dev_data->mutex);
#endif

#ifdef CONFIG_MULTITHREADING
/* Schedule processing the message. */
schedule_ept_bound_process(dev_data);
#else
ept_bound_process(dev_data);
#endif

return 0;
}
Expand Down Expand Up @@ -934,7 +975,11 @@ static void control_bound(void *priv)

/* Set flag that ICMsg is bounded and now, endpoint bounding may start. */
atomic_or(&dev_data->flags, CONTROL_BOUNDED);
#ifdef CONFIG_MULTITHREADING
schedule_ept_bound_process(dev_data);
#else
ept_bound_process(dev_data);
#endif
}

/**
Expand Down Expand Up @@ -1031,8 +1076,12 @@ static int register_ept(const struct device *instance, void **token,
/* Keep endpoint address in token. */
*token = ept;

#ifdef CONFIG_MULTITHREADING
/* Rest of the bounding will be done in the system workqueue. */
schedule_ept_bound_process(dev_data);
#else
ept_bound_process(dev_data);
#endif

return r;
}
Expand Down Expand Up @@ -1131,6 +1180,7 @@ static int backend_init(const struct device *instance)
{
const struct icbmsg_config *conf = instance->config;
struct backend_data *dev_data = instance->data;
#ifdef CONFIG_MULTITHREADING
static K_THREAD_STACK_DEFINE(ep_bound_work_q_stack, EP_BOUND_WORK_Q_STACK_SIZE);
static bool is_work_q_started;

Expand All @@ -1142,12 +1192,15 @@ static int backend_init(const struct device *instance)

is_work_q_started = true;
}
#endif

dev_data->conf = conf;
dev_data->is_initiator = (conf->rx.blocks_ptr < conf->tx.blocks_ptr);
#ifdef CONFIG_MULTITHREADING
k_mutex_init(&dev_data->mutex);
k_work_init(&dev_data->ep_bound_work, ept_bound_process);
k_sem_init(&dev_data->block_wait_sem, 0, 1);
#endif
memset(&dev_data->waiting_bound, 0xFF, sizeof(dev_data->waiting_bound));
memset(&dev_data->ept_map, EPT_ADDR_INVALID, sizeof(dev_data->ept_map));
return 0;
Expand Down

0 comments on commit 5de1c09

Please sign in to comment.