• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

mendersoftware / mender-mcu / 2099574738

14 Oct 2025 08:53AM UTC coverage: 60.837% (+3.4%) from 57.388%
2099574738

push

gitlab-ci

web-flow
Merge pull request #214 from danielskinstad/device-tiers

feat: send tier in authentication request

19 of 20 new or added lines in 3 files covered. (95.0%)

21 existing lines in 4 files now uncovered.

2456 of 4037 relevant lines covered (60.84%)

68.91 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

52.63
/src/platform/os/zephyr/os.c
1
/**
2
 * @file      os.c
3
 * @brief     Mender OS interface for Zephyr platform
4
 *
5
 * Copyright joelguittet and mender-mcu-client contributors
6
 * Copyright Northern.tech AS
7
 *
8
 * Licensed under the Apache License, Version 2.0 (the "License");
9
 * you may not use this file except in compliance with the License.
10
 * You may obtain a copy of the License at
11
 *
12
 *     http://www.apache.org/licenses/LICENSE-2.0
13
 *
14
 * Unless required by applicable law or agreed to in writing, software
15
 * distributed under the License is distributed on an "AS IS" BASIS,
16
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
 * See the License for the specific language governing permissions and
18
 * limitations under the License.
19
 */
20

21
#include <zephyr/kernel.h>
22
#include <zephyr/sys/reboot.h> /* sys_reboot() */
23
#include "alloc.h"
24
#include "log.h"
25
#include "os.h"
26
#include "utils.h"
27

28
#ifdef CONFIG_MENDER_SCHEDULER_SEPARATE_WORK_QUEUE
29
/**
30
 * @brief Default work queue stack size (kB)
31
 */
32
#ifndef CONFIG_MENDER_SCHEDULER_WORK_QUEUE_STACK_SIZE
33
#define CONFIG_MENDER_SCHEDULER_WORK_QUEUE_STACK_SIZE (12)
34
#endif /* CONFIG_MENDER_SCHEDULER_WORK_QUEUE_STACK_SIZE */
35

36
/**
37
 * @brief Default work queue priority
38
 */
39
#ifndef CONFIG_MENDER_SCHEDULER_WORK_QUEUE_PRIORITY
40
#define CONFIG_MENDER_SCHEDULER_WORK_QUEUE_PRIORITY (5)
41
#endif /* CONFIG_MENDER_SCHEDULER_WORK_QUEUE_PRIORITY */
42

43
/**
44
 * @brief Mender scheduler work queue stack
45
 */
46
K_THREAD_STACK_DEFINE(work_queue_stack, CONFIG_MENDER_SCHEDULER_WORK_QUEUE_STACK_SIZE * 1024);
47

48
/**
49
 * @brief Mender work queue
50
 */
51
static struct k_work_q            work_queue;
52
static struct k_work_queue_config work_queue_config;
53
#endif /* CONFIG_MENDER_SCHEDULER_SEPARATE_WORK_QUEUE */
54

55
/**
56
 * @brief Work context
57
 */
58
typedef struct mender_platform_work_t {
59
    mender_os_scheduler_work_params_t params;    /**< Work parameters */
60
    struct k_work_delayable           delayable; /**< The delayable work item executing the work function */
61
    bool                              activated; /**< Flag indicating the work is activated */
62
} mender_platform_work_t;
63

64
static void mender_os_scheduler_work_handler(struct k_work *work_item);
65

66
mender_err_t
67
mender_os_scheduler_init(void) {
29✔
68
#ifdef CONFIG_MENDER_SCHEDULER_SEPARATE_WORK_QUEUE
69
    /* Create and start work queue */
70
    work_queue_config.name      = "mender_work_queue";
29✔
71
    work_queue_config.no_yield  = false;
29✔
72
    work_queue_config.essential = true; /* TODO: configurable? */
29✔
73

74
    k_work_queue_init(&work_queue);
29✔
75
    k_work_queue_start(
29✔
76
        &work_queue, work_queue_stack, CONFIG_MENDER_SCHEDULER_WORK_QUEUE_STACK_SIZE * 1024, CONFIG_MENDER_SCHEDULER_WORK_QUEUE_PRIORITY, &work_queue_config);
77
    k_thread_name_set(k_work_queue_thread_get(&work_queue), "mender_work_queue");
29✔
78
#endif /* CONFIG_MENDER_SCHEDULER_SEPARATE_WORK_QUEUE */
79

80
    return MENDER_OK;
29✔
81
}
82

83
static uint16_t backoff_interval;
84

85
mender_err_t
86
mender_os_scheduler_work_create(mender_os_scheduler_work_params_t *work_params, mender_work_t **work) {
58✔
87
    assert(NULL != work_params);
58✔
88
    assert(NULL != work_params->function);
58✔
89
    assert(NULL != work_params->name);
58✔
90
    assert(NULL != work);
58✔
91

92
    /* Create work context */
93
    mender_platform_work_t *work_context = mender_calloc(1, sizeof(mender_platform_work_t));
58✔
94
    if (NULL == work_context) {
58✔
95
        mender_log_error("Unable to allocate memory");
×
96
        goto FAIL;
×
97
    }
98

99
    /* Copy work parameters */
100
    work_context->params.function             = work_params->function;
58✔
101
    work_context->params.period               = work_params->period;
58✔
102
    work_context->params.backoff.max_interval = work_params->backoff.max_interval;
58✔
103
    work_context->params.backoff.interval     = work_params->backoff.interval;
58✔
104

105
    /* Store the backoff interval so we can reset it */
106
    backoff_interval = work_params->backoff.interval;
58✔
107

108
    if (NULL == (work_context->params.name = mender_utils_strdup(work_params->name))) {
58✔
109
        mender_log_error("Unable to allocate memory");
×
110
        goto FAIL;
×
111
    }
112

113
    k_work_init_delayable(&(work_context->delayable), mender_os_scheduler_work_handler);
58✔
114

115
    /* Return handle to the new work context */
116
    *work = work_context;
58✔
117

118
    return MENDER_OK;
58✔
119

120
FAIL:
×
121

122
    /* Release memory */
123
    if (NULL != work_context) {
×
124
        mender_free(work_context->params.name);
×
125
        mender_free(work_context);
×
126
    }
127

128
    return MENDER_FAIL;
×
129
}
130

131
mender_err_t
132
mender_os_scheduler_work_activate(mender_work_t *work) {
58✔
133
    assert(NULL != work);
58✔
134
    assert(0 != work->params.period);
58✔
135

136
    mender_log_debug("Activating %s every %ju seconds", work->params.name, (uintmax_t)work->params.period);
58✔
137

138
#ifdef CONFIG_MENDER_SCHEDULER_SEPARATE_WORK_QUEUE
139
    k_work_reschedule_for_queue(&work_queue, &(work->delayable), K_NO_WAIT);
58✔
140
#else
141
    k_work_reschedule(&(work->delayable), K_SECONDS(1));
142
#endif /* CONFIG_MENDER_SCHEDULER_SEPARATE_WORK_QUEUE */
143

144
    /* Indicate the work has been activated */
145
    work->activated = true;
58✔
146

147
    return MENDER_OK;
58✔
148
}
149

150
mender_err_t
151
mender_os_scheduler_work_execute(mender_work_t *work) {
1✔
152
    assert(NULL != work);
1✔
153

154
#ifdef CONFIG_MENDER_SCHEDULER_SEPARATE_WORK_QUEUE
155
    k_work_reschedule_for_queue(&work_queue, &(work->delayable), K_NO_WAIT);
1✔
156
#else
157
    k_work_reschedule(&(work->delayable), K_NO_WAIT);
158
#endif /* CONFIG_MENDER_SCHEDULER_SEPARATE_WORK_QUEUE */
159

160
    return MENDER_OK;
1✔
161
}
162

163
mender_err_t
164
mender_os_scheduler_work_set_period(mender_work_t *work, uint32_t period) {
×
165
    assert(NULL != work);
×
166

167
    /* Set timer period */
168
    work->params.period = period;
×
169
    if (work->params.period > 0) {
×
170
#ifdef CONFIG_MENDER_SCHEDULER_SEPARATE_WORK_QUEUE
171
        k_work_reschedule_for_queue(&work_queue, &(work->delayable), K_SECONDS(period));
×
172
#else
173
        k_work_reschedule(&(work->delayable), K_SECONDS(period));
174
#endif /* CONFIG_MENDER_SCHEDULER_SEPARATE_WORK_QUEUE */
175
    } else {
176
        k_work_cancel_delayable(&(work->delayable));
×
177
        work->activated = false;
×
178
    }
179

180
    return MENDER_OK;
×
181
}
182

183
mender_err_t
184
mender_os_scheduler_work_deactivate(mender_work_t *work) {
×
185
    assert(NULL != work);
×
186

187
    /* Check if the work was activated */
188
    if (work->activated) {
×
189
        k_work_cancel_delayable(&(work->delayable));
×
190

191
        /* Indicate the work has been deactivated */
192
        work->activated = false;
×
193
    }
194

195
    return MENDER_OK;
×
196
}
197

198
mender_err_t
199
mender_os_scheduler_work_delete(mender_work_t *work) {
×
200
    if (NULL == work) {
×
201
        return MENDER_OK;
×
202
    }
203

204
    mender_free(work->params.name);
×
205
    mender_free(work);
×
206

207
    return MENDER_OK;
×
208
}
209

210
mender_err_t
211
mender_os_scheduler_exit(void) {
×
212
#ifdef CONFIG_MENDER_SCHEDULER_SEPARATE_WORK_QUEUE
213
    k_work_queue_drain(&work_queue, true);
×
214
#endif /* CONFIG_MENDER_SCHEDULER_SEPARATE_WORK_QUEUE */
215
    return MENDER_OK;
×
216
}
217

218
static void
219
mender_os_scheduler_work_handler(struct k_work *work_item) {
98✔
220
    assert(NULL != work_item);
98✔
221
    mender_err_t ret;
222

223
    /* Get work context */
224
    struct k_work_delayable *delayable_item = k_work_delayable_from_work(work_item);
98✔
225
    mender_platform_work_t  *work           = CONTAINER_OF(delayable_item, mender_platform_work_t, delayable);
98✔
226
    assert(NULL != work);
98✔
227

228
    if (!work->activated) {
98✔
229
        /* nothing more to do */
230
        return;
×
231
    }
232

233
    uint32_t period = work->params.period;
98✔
234

235
    /* Call work function */
236
    mender_log_debug("Executing %s work", work->params.name);
98✔
237
    if (MENDER_DONE == (ret = work->params.function())) {
98✔
238
        /* Reset the backoff */
239
        work->params.backoff.interval = backoff_interval;
×
240
        /* nothing more to do */
241
        return;
×
242
    }
243
    if (MENDER_OK != ret) {
83✔
244
        if (MENDER_RETRY_ERROR == ret) {
×
245
            mender_log_debug("Retry error detected, retrying with backoff");
×
246
            period                        = work->params.backoff.interval;
×
247
            uint16_t next                 = work->params.backoff.interval * 2;
×
248
            work->params.backoff.interval = (next >= work->params.backoff.max_interval) ? work->params.backoff.max_interval : next;
×
249
        }
250
        mender_log_error("Work %s failed, retrying in %" PRIu32 " seconds", work->params.name, period);
×
251
    } else {
252
        /* Reset the backoff */
253
        work->params.backoff.interval = backoff_interval;
83✔
254
    }
255

256
    /* Reschedule self for the next period */
257
#ifdef CONFIG_MENDER_SCHEDULER_SEPARATE_WORK_QUEUE
258
    k_work_reschedule_for_queue(&work_queue, delayable_item, K_SECONDS(period));
83✔
259
#else
260
    k_work_reschedule(delayable_item, K_SECONDS(period));
261
#endif /* CONFIG_MENDER_SCHEDULER_SEPARATE_WORK_QUEUE */
262
}
263

264
mender_err_t
265
mender_os_mutex_create(void **handle) {
58✔
266
    assert(NULL != handle);
58✔
267

268
    /* Create mutex */
269
    if (NULL == (*handle = mender_malloc(sizeof(struct k_mutex)))) {
58✔
270
        return MENDER_FAIL;
×
271
    }
272
    if (0 != k_mutex_init((struct k_mutex *)(*handle))) {
58✔
273
        FREE_AND_NULL(*handle);
×
274
        return MENDER_FAIL;
×
275
    }
276

277
    return MENDER_OK;
58✔
278
}
279

280
mender_err_t
281
mender_os_mutex_take(void *handle, int32_t delay_ms) {
307✔
282
    assert(NULL != handle);
307✔
283

284
    /* Take mutex */
285
    if (0 != k_mutex_lock((struct k_mutex *)handle, (delay_ms >= 0) ? K_MSEC(delay_ms) : K_FOREVER)) {
307✔
286
        return MENDER_FAIL;
×
287
    }
288

289
    return MENDER_OK;
307✔
290
}
291

292
mender_err_t
293
mender_os_mutex_give(void *handle) {
292✔
294
    assert(NULL != handle);
292✔
295

296
    /* Give mutex */
297
    if (0 != k_mutex_unlock((struct k_mutex *)handle)) {
292✔
298
        return MENDER_FAIL;
×
299
    }
300

301
    return MENDER_OK;
292✔
302
}
303

304
mender_err_t
305
mender_os_mutex_delete(void *handle) {
×
306

307
    /* Release memory */
308
    mender_free(handle);
×
309

310
    return MENDER_OK;
×
311
}
312

313
void
314
mender_os_reboot(void) {
×
315
    sys_reboot(SYS_REBOOT_WARM);
×
316
}
317

318
void
UNCOV
319
mender_os_sleep(uint32_t period_ms) {
×
UNCOV
320
    k_sleep(K_MSEC(period_ms));
×
UNCOV
321
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc