Changes in / [d3b2ffa:1a9174e] in mainline
- Files:
-
- 1 deleted
- 23 edited
Legend:
- Unmodified
- Added
- Removed
-
HelenOS.config
rd3b2ffa r1a9174e 494 494 495 495 % Support for NS16550 controller (kernel console) 496 ! [(CONFIG_HID_IN=generic|CONFIG_HID_IN=serial|CONFIG_HID_OUT=generic|CONFIG_HID_OUT=serial)&(PLATFORM=ia32|PLATFORM=amd64)] CONFIG_NS16550_KCON ( y/n)496 ! [(CONFIG_HID_IN=generic|CONFIG_HID_IN=serial|CONFIG_HID_OUT=generic|CONFIG_HID_OUT=serial)&(PLATFORM=ia32|PLATFORM=amd64)] CONFIG_NS16550_KCON (n/y) 497 497 498 498 % Use NS16550 controller as serial input (kernel console) -
kernel/generic/include/synch/waitq.h
rd3b2ffa r1a9174e 82 82 extern void _waitq_wakeup_unsafe(waitq_t *, wakeup_mode_t); 83 83 extern void waitq_interrupt_sleep(struct thread *); 84 extern void waitq_unsleep(waitq_t *); 84 85 extern int waitq_count_get(waitq_t *); 85 86 extern void waitq_count_set(waitq_t *, int val); -
kernel/generic/src/ipc/ipc.c
rd3b2ffa r1a9174e 550 550 errno_t rc; 551 551 552 restart: 552 553 rc = waitq_sleep_timeout(&box->wq, usec, flags, NULL); 553 554 if (rc != EOK) … … 589 590 list_append(&request->ab_link, &box->dispatched_calls); 590 591 } else { 591 /* 592 * This can happen regularly after ipc_cleanup, or in 593 * response to ipc_poke(). Let the caller sort out the wakeup. 594 */ 592 /* This can happen regularly after ipc_cleanup */ 595 593 irq_spinlock_unlock(&box->lock, true); 596 return NULL;594 goto restart; 597 595 } 598 596 -
kernel/generic/src/ipc/sysipc.c
rd3b2ffa r1a9174e 856 856 sys_errno_t sys_ipc_poke(void) 857 857 { 858 waitq_ wakeup(&TASK->answerbox.wq, WAKEUP_FIRST);858 waitq_unsleep(&TASK->answerbox.wq); 859 859 return EOK; 860 860 } -
kernel/generic/src/synch/waitq.c
rd3b2ffa r1a9174e 190 190 if (do_wakeup) 191 191 thread_ready(thread); 192 } 193 194 /** Interrupt the first thread sleeping in the wait queue. 195 * 196 * Note that the caller somehow needs to know that the thread to be interrupted 197 * is sleeping interruptibly. 198 * 199 * @param wq Pointer to wait queue. 200 * 201 */ 202 void waitq_unsleep(waitq_t *wq) 203 { 204 irq_spinlock_lock(&wq->lock, true); 205 206 if (!list_empty(&wq->sleepers)) { 207 thread_t *thread = list_get_instance(list_first(&wq->sleepers), 208 thread_t, wq_link); 209 210 irq_spinlock_lock(&thread->lock, false); 211 212 assert(thread->sleep_interruptible); 213 214 if ((thread->timeout_pending) && 215 (timeout_unregister(&thread->sleep_timeout))) 216 thread->timeout_pending = false; 217 218 list_remove(&thread->wq_link); 219 thread->saved_context = thread->sleep_interruption_context; 220 thread->sleep_queue = NULL; 221 222 irq_spinlock_unlock(&thread->lock, false); 223 thread_ready(thread); 224 } 225 226 irq_spinlock_unlock(&wq->lock, true); 192 227 } 193 228 -
tools/ew.py
rd3b2ffa r1a9174e 217 217 cmdline += ' -nographic' 218 218 219 if ((not is_override('nographic')) and not is_override('noserial')):220 cmdline += ' -serial stdio'221 222 219 if (is_override('bigmem')): 223 220 cmdline += ' -m 4G' … … 346 343 print("-notablet\tDisable USB tablet (use only relative-position PS/2 mouse instead), if applicable.") 347 344 print("-nographic\tDisable graphical output. Serial port output must be enabled for this to be useful.") 348 print("-noserial\tDisable serial port output in the terminal.")349 345 print("-bigmem\tSets maximum RAM size to 4GB.") 350 346 … … 405 401 elif sys.argv[i] == '-bigmem': 406 402 overrides['bigmem'] = True 407 elif sys.argv[i] == '-noserial':408 overrides['noserial'] = True409 403 elif sys.argv[i] == '-qemu_path' and i < len(sys.argv) - 1: 410 404 expect_qemu = True -
uspace/app/stats/stats.c
rd3b2ffa r1a9174e 38 38 #include <stdio.h> 39 39 #include <task.h> 40 #include <thread.h> 40 41 #include <stats.h> 41 42 #include <errno.h> -
uspace/app/top/top.c
rd3b2ffa r1a9174e 39 39 #include <stdlib.h> 40 40 #include <task.h> 41 #include <thread.h> 41 42 #include <sys/time.h> 42 43 #include <errno.h> -
uspace/drv/bus/usb/xhci/endpoint.c
rd3b2ffa r1a9174e 102 102 if (dev->speed >= USB_SPEED_HIGH || 103 103 ep->transfer_type != USB_TRANSFER_INTERRUPT) { 104 105 // XXX: According to the spec, the interval should be106 // from [1, 16]. However, in QEMU, we get 0 here107 // (a QEMU bug?).108 if (xhci_ep->interval == 0)109 xhci_ep->interval = 8;110 111 104 xhci_ep->interval = 1 << (xhci_ep->interval - 1); 112 105 } -
uspace/drv/nic/virtio-net/virtio-net.c
rd3b2ffa r1a9174e 69 69 .driver_ops = &virtio_net_driver_ops 70 70 }; 71 72 /** Allocate DMA buffers 73 * 74 * @param buffers[in] Number of buffers to allocate. 75 * @param size[in] Size of each buffer. 76 * @param write[in] True if the buffers are writable by the driver, false 77 * otherwise. 78 * @param buf[out] Output array holding virtual addresses of the allocated 79 * buffers. 80 * @param buf_p[out] Output array holding physical addresses of the allocated 81 * buffers. 82 * 83 * The buffers can be deallocated by virtio_net_teardown_bufs(). 84 * 85 * @return EOK on success or error code. 86 */ 87 static errno_t virtio_net_setup_bufs(unsigned int buffers, size_t size, 88 bool write, void *buf[], uintptr_t buf_p[]) 89 { 90 /* 91 * Allocate all buffers at once in one large chunk. 92 */ 93 void *virt = AS_AREA_ANY; 94 uintptr_t phys; 95 errno_t rc = dmamem_map_anonymous(buffers * size, 0, 96 write ? AS_AREA_WRITE : AS_AREA_READ, 0, &phys, &virt); 97 if (rc != EOK) 98 return rc; 99 100 ddf_msg(LVL_NOTE, "DMA buffers: %p-%p", virt, virt + buffers * size); 101 102 /* 103 * Calculate addresses of the individual buffers for easy access. 104 */ 105 for (unsigned i = 0; i < buffers; i++) { 106 buf[i] = virt + i * size; 107 buf_p[i] = phys + i * size; 108 } 109 110 return EOK; 111 } 112 113 /** Deallocate DMA buffers 114 * 115 * @param buf[in] Array holding the virtual addresses of the DMA buffers 116 * previously allocated by virtio_net_setup_bufs(). 117 */ 118 static void virtio_net_teardown_bufs(void *buf[]) 119 { 120 if (buf[0]) { 121 dmamem_unmap_anonymous(buf[0]); 122 buf[0] = NULL; 123 } 124 } 125 126 /** Create free descriptor list from the unused VIRTIO descriptors 127 * 128 * @param vdev[in] VIRTIO device for which the free list will be created. 129 * @param num[in] Index of the virtqueue for which the free list will be 130 * created. 131 * @param size[in] Number of descriptors on the free list. The free list will 132 * contain descriptors starting from 0 to \a size - 1. 133 * @param head[out] Variable that will hold the VIRTIO descriptor at the head 134 * of the free list. 135 */ 136 static void virtio_net_create_desc_free_list(virtio_dev_t *vdev, uint16_t num, 137 uint16_t size, uint16_t *head) 138 { 139 for (unsigned i = 0; i < size; i++) { 140 virtio_virtq_desc_set(vdev, num, i, 0, 0, 141 VIRTQ_DESC_F_NEXT, (i + 1 == size) ? -1U : i + 1); 142 } 143 *head = 0; 144 } 145 146 /** Allocate a descriptor from the free list 147 * 148 * @param vdev[in] VIRTIO device with the free list. 149 * @param num[in] Index of the virtqueue with free list. 150 * @param head[in,out] Head of the free list. 151 * 152 * @return Allocated descriptor or 0xFFFF if the list is empty. 153 */ 154 static uint16_t virtio_net_alloc_desc(virtio_dev_t *vdev, uint16_t num, 155 uint16_t *head) 156 { 157 virtq_t *q = &vdev->queues[num]; 158 fibril_mutex_lock(&q->lock); 159 uint16_t descno = *head; 160 if (descno != (uint16_t) -1U) 161 *head = virtio_virtq_desc_get_next(vdev, num, descno); 162 fibril_mutex_unlock(&q->lock); 163 return descno; 164 } 165 166 /** Free a descriptor into the free list 167 * 168 * @param vdev[in] VIRTIO device with the free list. 169 * @param num[in] Index of the virtqueue with free list. 170 * @param head[in,out] Head of the free list. 171 * @param descno[in] The freed descriptor. 172 */ 173 static void virtio_net_free_desc(virtio_dev_t *vdev, uint16_t num, 174 uint16_t *head, uint16_t descno) 175 { 176 virtq_t *q = &vdev->queues[num]; 177 fibril_mutex_lock(&q->lock); 178 virtio_virtq_desc_set(vdev, num, descno, 0, 0, VIRTQ_DESC_F_NEXT, 179 *head); 180 *head = descno; 181 fibril_mutex_unlock(&q->lock); 182 } 71 183 72 184 static void virtio_net_irq_handler(ipc_call_t *icall, ddf_dev_t *dev) … … 102 214 103 215 while (virtio_virtq_consume_used(vdev, TX_QUEUE_1, &descno, &len)) { 104 virtio_ free_desc(vdev, TX_QUEUE_1, &virtio_net->tx_free_head,105 descno);216 virtio_net_free_desc(vdev, TX_QUEUE_1, 217 &virtio_net->tx_free_head, descno); 106 218 } 107 219 while (virtio_virtq_consume_used(vdev, CT_QUEUE_1, &descno, &len)) { 108 virtio_ free_desc(vdev, CT_QUEUE_1, &virtio_net->ct_free_head,109 descno);220 virtio_net_free_desc(vdev, CT_QUEUE_1, 221 &virtio_net->ct_free_head, descno); 110 222 } 111 223 } … … 233 345 * Setup DMA buffers 234 346 */ 235 rc = virtio_ setup_dma_bufs(RX_BUFFERS, RX_BUF_SIZE, false,347 rc = virtio_net_setup_bufs(RX_BUFFERS, RX_BUF_SIZE, false, 236 348 virtio_net->rx_buf, virtio_net->rx_buf_p); 237 349 if (rc != EOK) 238 350 goto fail; 239 rc = virtio_ setup_dma_bufs(TX_BUFFERS, TX_BUF_SIZE, true,351 rc = virtio_net_setup_bufs(TX_BUFFERS, TX_BUF_SIZE, true, 240 352 virtio_net->tx_buf, virtio_net->tx_buf_p); 241 353 if (rc != EOK) 242 354 goto fail; 243 rc = virtio_ setup_dma_bufs(CT_BUFFERS, CT_BUF_SIZE, true,355 rc = virtio_net_setup_bufs(CT_BUFFERS, CT_BUF_SIZE, true, 244 356 virtio_net->ct_buf, virtio_net->ct_buf_p); 245 357 if (rc != EOK) … … 267 379 * Put all TX and CT buffers on a free list 268 380 */ 269 virtio_ create_desc_free_list(vdev, TX_QUEUE_1, TX_BUFFERS,381 virtio_net_create_desc_free_list(vdev, TX_QUEUE_1, TX_BUFFERS, 270 382 &virtio_net->tx_free_head); 271 virtio_ create_desc_free_list(vdev, CT_QUEUE_1, CT_BUFFERS,383 virtio_net_create_desc_free_list(vdev, CT_QUEUE_1, CT_BUFFERS, 272 384 &virtio_net->ct_free_head); 273 385 … … 302 414 303 415 fail: 304 virtio_ teardown_dma_bufs(virtio_net->rx_buf);305 virtio_ teardown_dma_bufs(virtio_net->tx_buf);306 virtio_ teardown_dma_bufs(virtio_net->ct_buf);416 virtio_net_teardown_bufs(virtio_net->rx_buf); 417 virtio_net_teardown_bufs(virtio_net->tx_buf); 418 virtio_net_teardown_bufs(virtio_net->ct_buf); 307 419 308 420 virtio_device_setup_fail(vdev); … … 316 428 virtio_net_t *virtio_net = (virtio_net_t *) nic_get_specific(nic); 317 429 318 virtio_ teardown_dma_bufs(virtio_net->rx_buf);319 virtio_ teardown_dma_bufs(virtio_net->tx_buf);320 virtio_ teardown_dma_bufs(virtio_net->ct_buf);430 virtio_net_teardown_bufs(virtio_net->rx_buf); 431 virtio_net_teardown_bufs(virtio_net->tx_buf); 432 virtio_net_teardown_bufs(virtio_net->ct_buf); 321 433 322 434 virtio_device_setup_fail(&virtio_net->virtio_dev); … … 334 446 } 335 447 336 uint16_t descno = virtio_ alloc_desc(vdev, TX_QUEUE_1,448 uint16_t descno = virtio_net_alloc_desc(vdev, TX_QUEUE_1, 337 449 &virtio_net->tx_free_head); 338 450 if (descno == (uint16_t) -1U) { -
uspace/lib/c/generic/async/ports.c
rd3b2ffa r1a9174e 35 35 #include <ipc/irq.h> 36 36 #include <ipc/event.h> 37 #include <futex.h> 37 38 #include <fibril.h> 38 39 #include <adt/hash_table.h> -
uspace/lib/c/generic/async/server.c
rd3b2ffa r1a9174e 122 122 #include "../private/fibril.h" 123 123 124 #define DPRINTF(...) ((void) 0)125 126 124 /** Async framework global futex */ 127 125 futex_t async_futex = FUTEX_INITIALIZER; … … 134 132 link_t link; 135 133 134 cap_call_handle_t chandle; 136 135 ipc_call_t call; 137 136 } msg_t; … … 165 164 list_t msg_queue; 166 165 166 /** Identification of the opening call. */ 167 cap_call_handle_t chandle; 168 167 169 /** Call data of the opening call. */ 168 170 ipc_call_t call; … … 177 179 void *data; 178 180 } connection_t; 179 180 /* Member of notification_t::msg_list. */181 typedef struct {182 link_t link;183 ipc_call_t calldata;184 } notification_msg_t;185 181 186 182 /* Notification data */ … … 201 197 void *arg; 202 198 203 /** List of arrived notifications. */ 204 list_t msg_list; 199 /** Data of the most recent notification. */ 200 ipc_call_t calldata; 201 202 /** 203 * How many notifications with this `imethod` arrived since it was last 204 * handled. If `count` > 1, `calldata` only holds the data for the most 205 * recent such notification, all the older data being lost. 206 * 207 * `async_spawn_notification_handler()` can be used to increase the 208 * number of notifications that can be processed simultaneously, 209 * reducing the likelihood of losing them when the handler blocks. 210 */ 211 long count; 205 212 } notification_t; 206 213 … … 242 249 static LIST_INITIALIZE(notification_queue); 243 250 static FIBRIL_SEMAPHORE_INITIALIZE(notification_semaphore, 0); 244 245 static LIST_INITIALIZE(notification_freelist);246 static long notification_freelist_total = 0;247 static long notification_freelist_used = 0;248 251 249 252 static sysarg_t notification_avail = 0; … … 409 412 client_t *client = async_client_get(fibril_connection->in_task_id, true); 410 413 if (!client) { 411 ipc_answer_0(fibril_connection->c all.cap_handle, ENOMEM);414 ipc_answer_0(fibril_connection->chandle, ENOMEM); 412 415 return 0; 413 416 } … … 418 421 * Call the connection handler function. 419 422 */ 420 fibril_connection->handler(fibril_connection->c all.cap_handle,423 fibril_connection->handler(fibril_connection->chandle, 421 424 &fibril_connection->call, fibril_connection->data); 422 425 … … 445 448 446 449 list_remove(&msg->link); 447 ipc_answer_0(msg->c all.cap_handle, EHANGUP);450 ipc_answer_0(msg->chandle, EHANGUP); 448 451 free(msg); 449 452 } … … 468 471 * @param in_task_id Identification of the incoming connection. 469 472 * @param in_phone_hash Identification of the incoming connection. 470 * @param call Call data of the opening call. If call is NULL, 471 * the connection was opened by accepting the 472 * IPC_M_CONNECT_TO_ME call and this function is 473 * called directly by the server. 473 * @param chandle Handle of the opening IPC_M_CONNECT_ME_TO call. 474 * If chandle is CAP_NIL, the connection was opened by 475 * accepting the IPC_M_CONNECT_TO_ME call and this 476 * function is called directly by the server. 477 * @param call Call data of the opening call. 474 478 * @param handler Connection handler. 475 479 * @param data Client argument to pass to the connection handler. … … 479 483 */ 480 484 static fid_t async_new_connection(task_id_t in_task_id, sysarg_t in_phone_hash, 481 ipc_call_t *call, async_port_handler_t handler, void *data) 485 cap_call_handle_t chandle, ipc_call_t *call, async_port_handler_t handler, 486 void *data) 482 487 { 483 488 connection_t *conn = malloc(sizeof(*conn)); 484 489 if (!conn) { 485 if (c all)486 ipc_answer_0(c all->cap_handle, ENOMEM);490 if (chandle != CAP_NIL) 491 ipc_answer_0(chandle, ENOMEM); 487 492 488 493 return (uintptr_t) NULL; … … 492 497 conn->in_phone_hash = in_phone_hash; 493 498 list_initialize(&conn->msg_queue); 499 conn->chandle = chandle; 494 500 conn->close_chandle = CAP_NIL; 495 501 conn->handler = handler; … … 498 504 if (call) 499 505 conn->call = *call; 500 else501 conn->call.cap_handle = CAP_NIL;502 506 503 507 /* We will activate the fibril ASAP */ … … 508 512 free(conn); 509 513 510 if (c all)511 ipc_answer_0(c all->cap_handle, ENOMEM);514 if (chandle != CAP_NIL) 515 ipc_answer_0(chandle, ENOMEM); 512 516 513 517 return (uintptr_t) NULL; … … 564 568 sysarg_t phone_hash = IPC_GET_ARG5(answer); 565 569 fid_t fid = async_new_connection(answer.in_task_id, phone_hash, 566 NULL, handler, data);570 CAP_NIL, NULL, handler, data); 567 571 if (fid == (uintptr_t) NULL) 568 572 return ENOMEM; … … 633 637 * timeouts are unregistered. 634 638 * 635 * @param call Data of the incoming call. 639 * @param chandle Handle of the incoming call. 640 * @param call Data of the incoming call. 636 641 * 637 642 * @return False if the call doesn't match any connection. … … 639 644 * 640 645 */ 641 static bool route_call( ipc_call_t *call)646 static bool route_call(cap_call_handle_t chandle, ipc_call_t *call) 642 647 { 643 648 assert(call); … … 662 667 } 663 668 669 msg->chandle = chandle; 664 670 msg->call = *call; 665 671 list_append(&msg->link, &conn->msg_queue); 666 672 667 673 if (IPC_GET_IMETHOD(*call) == IPC_M_PHONE_HUNGUP) 668 conn->close_chandle = c all->cap_handle;674 conn->close_chandle = chandle; 669 675 670 676 /* If the connection fibril is waiting for an event, activate it */ … … 703 709 notification_t *notification = list_get_instance( 704 710 list_first(¬ification_queue), notification_t, qlink); 711 list_remove(¬ification->qlink); 705 712 706 713 async_notification_handler_t handler = notification->handler; 707 714 void *arg = notification->arg; 708 709 notification_msg_t *m = list_pop(¬ification->msg_list, 710 notification_msg_t, link); 711 assert(m); 712 ipc_call_t calldata = m->calldata; 713 714 notification_freelist_used--; 715 716 if (notification_freelist_total > 64 && 717 notification_freelist_total > 2 * notification_freelist_used) { 718 /* Going to free the structure if we have too much. */ 719 notification_freelist_total--; 720 } else { 721 /* Otherwise add to freelist. */ 722 list_append(&m->link, ¬ification_freelist); 723 m = NULL; 724 } 725 726 if (list_empty(¬ification->msg_list)) 727 list_remove(¬ification->qlink); 715 ipc_call_t calldata = notification->calldata; 716 long count = notification->count; 717 718 notification->count = 0; 728 719 729 720 futex_unlock(¬ification_futex); 721 722 // FIXME: Pass count to the handler. It might be important. 723 (void) count; 730 724 731 725 if (handler) 732 726 handler(&calldata, arg); 733 734 free(m);735 727 } 736 728 … … 768 760 futex_lock(¬ification_futex); 769 761 770 notification_msg_t *m = list_pop(¬ification_freelist,771 notification_msg_t, link);772 773 if (!m) {774 futex_unlock(¬ification_futex);775 m = malloc(sizeof(notification_msg_t));776 if (!m) {777 DPRINTF("Out of memory.\n");778 abort();779 }780 781 futex_lock(¬ification_futex);782 notification_freelist_total++;783 }784 785 762 ht_link_t *link = hash_table_find(¬ification_hash_table, 786 763 &IPC_GET_IMETHOD(*call)); … … 788 765 /* Invalid notification. */ 789 766 // TODO: Make sure this can't happen and turn it into assert. 790 notification_freelist_total--;791 767 futex_unlock(¬ification_futex); 792 free(m);793 768 return; 794 769 } … … 797 772 hash_table_get_inst(link, notification_t, htlink); 798 773 799 notification_freelist_used++; 800 m->calldata = *call; 801 list_append(&m->link, ¬ification->msg_list); 802 803 if (!link_in_use(¬ification->qlink)) 804 list_append(¬ification->qlink, ¬ification_queue); 805 774 notification->count++; 775 notification->calldata = *call; 776 777 if (link_in_use(¬ification->qlink)) { 778 /* Notification already queued. */ 779 futex_unlock(¬ification_futex); 780 return; 781 } 782 783 list_append(¬ification->qlink, ¬ification_queue); 806 784 futex_unlock(¬ification_futex); 807 785 … … 824 802 notification->handler = handler; 825 803 notification->arg = arg; 826 827 list_initialize(¬ification->msg_list);828 804 829 805 fid_t fib = 0; … … 1035 1011 list_remove(&msg->link); 1036 1012 1037 cap_call_handle_t chandle = msg->c all.cap_handle;1013 cap_call_handle_t chandle = msg->chandle; 1038 1014 *call = msg->call; 1039 1015 free(msg); … … 1082 1058 * Otherwise the call is routed to its connection fibril. 1083 1059 * 1084 * @param call Data of the incoming call. 1085 * 1086 */ 1087 static void handle_call(ipc_call_t *call) 1060 * @param chandle Handle of the incoming call. 1061 * @param call Data of the incoming call. 1062 * 1063 */ 1064 static void handle_call(cap_call_handle_t chandle, ipc_call_t *call) 1088 1065 { 1089 1066 assert(call); … … 1092 1069 return; 1093 1070 1094 if (c all->cap_handle == CAP_NIL) {1071 if (chandle == CAP_NIL) { 1095 1072 if (call->flags & IPC_CALL_NOTIF) { 1096 1073 /* Kernel notification */ … … 1110 1087 async_get_port_handler(iface, 0, &data); 1111 1088 1112 async_new_connection(call->in_task_id, in_phone_hash, c all,1113 handler, data);1089 async_new_connection(call->in_task_id, in_phone_hash, chandle, 1090 call, handler, data); 1114 1091 return; 1115 1092 } 1116 1093 1117 1094 /* Try to route the call through the connection hash table */ 1118 if (route_call(c all))1095 if (route_call(chandle, call)) 1119 1096 return; 1120 1097 1121 1098 /* Unknown call from unknown phone - hang it up */ 1122 ipc_answer_0(c all->cap_handle, EHANGUP);1099 ipc_answer_0(chandle, EHANGUP); 1123 1100 } 1124 1101 … … 1201 1178 1202 1179 assert(rc == EOK); 1203 handle_call( &call);1180 handle_call(call.cap_handle, &call); 1204 1181 } 1205 1182 … … 1870 1847 } 1871 1848 1872 _ _noreturn void async_manager(void)1849 _Noreturn void async_manager(void) 1873 1850 { 1874 1851 futex_lock(&async_futex); -
uspace/lib/c/generic/fibril_synch.c
rd3b2ffa r1a9174e 44 44 #include <stdlib.h> 45 45 #include <stdio.h> 46 #include <io/kio.h>47 48 46 #include "private/async.h" 49 47 #include "private/fibril.h" 50 51 static fibril_local bool deadlocked = false;52 48 53 49 static void optimize_execution_power(void) … … 66 62 { 67 63 fibril_t *f = (fibril_t *) fibril_get_id(); 68 69 if (deadlocked) {70 kio_printf("Deadlock detected while printing deadlock. Aborting.\n");71 abort();72 }73 deadlocked = true;74 64 75 65 printf("Deadlock detected.\n"); -
uspace/lib/c/generic/ubsan.c
rd3b2ffa r1a9174e 98 98 #endif 99 99 void __ubsan_handle_nonnull_return(struct nonnull_return_data *data); 100 void __ubsan_handle_builtin_unreachable(struct unreachable_data *data);101 100 102 101 static void print_loc(const char *func, struct source_location *loc) … … 108 107 f += sizeof(func_prefix); 109 108 110 PRINTF(" #######Undefined behavior %s at %s:%" PRIu32 " col %" PRIu32 "\n",109 PRINTF("Undefined behavior %s at %s:%" PRIu32 " col %" PRIu32 "\n", 111 110 f, loc->file_name, loc->line, loc->column); 112 111 } … … 116 115 { 117 116 print_loc(__func__, &data->loc); 118 PRINTF("Type: %s, alignment: %lu, type_check_kind: %hhu\n",119 data->type->type_name, data->alignment, data->type_check_kind);120 117 ubsan_panic(); 121 118 } … … 222 219 ubsan_panic(); 223 220 } 224 225 void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)226 {227 print_loc(__func__, &data->loc);228 ubsan_panic();229 }230 -
uspace/lib/c/include/async.h
rd3b2ffa r1a9174e 51 51 #include <abi/cap.h> 52 52 53 #include <_bits/__noreturn.h>54 55 53 typedef sysarg_t aid_t; 56 54 typedef sysarg_t port_id_t; … … 110 108 typedef struct async_exch async_exch_t; 111 109 112 extern _ _noreturn void async_manager(void);110 extern _Noreturn void async_manager(void); 113 111 114 112 #define async_get_call(data) \ -
uspace/lib/c/include/futex.h
rd3b2ffa r1a9174e 45 45 atomic_t val; 46 46 #ifdef CONFIG_DEBUG_FUTEX 47 void *owner;47 _Atomic void *owner; 48 48 #endif 49 49 } futex_t; -
uspace/lib/c/include/setjmp.h
rd3b2ffa r1a9174e 35 35 36 36 #include <libarch/fibril_context.h> 37 #include <_bits/__noreturn.h>38 37 39 38 typedef context_t jmp_buf[1]; 40 39 41 40 extern int __setjmp(jmp_buf) __attribute__((returns_twice)); 42 extern _ _noreturn void __longjmp(jmp_buf, int);41 extern _Noreturn void __longjmp(jmp_buf, int); 43 42 44 43 #define setjmp __setjmp 45 extern _ _noreturn void longjmp(jmp_buf, int);44 extern _Noreturn void longjmp(jmp_buf, int); 46 45 47 46 #endif -
uspace/lib/drv/generic/remote_usb.c
rd3b2ffa r1a9174e 77 77 return EBADMEM; 78 78 79 sysarg_t address, depth, speed, handle, iface;79 usb_device_desc_t tmp_desc; 80 80 81 81 const errno_t ret = async_req_1_5(exch, DEV_IFACE_ID(USB_DEV_IFACE), 82 IPC_M_USB_GET_MY_DESCRIPTION, &address, &depth, &speed, &handle, 83 &iface); 84 if (ret == EOK && desc) { 85 *desc = (usb_device_desc_t) { 86 .address = address, 87 .depth = depth, 88 .speed = speed, 89 .handle = handle, 90 .iface = iface, 91 }; 92 } 93 82 IPC_M_USB_GET_MY_DESCRIPTION, 83 (sysarg_t *) &tmp_desc.address, 84 (sysarg_t *) &tmp_desc.depth, 85 (sysarg_t *) &tmp_desc.speed, 86 &tmp_desc.handle, 87 (sysarg_t *) &tmp_desc.iface); 88 if (ret == EOK && desc) 89 *desc = tmp_desc; 94 90 return ret; 95 91 } -
uspace/lib/softfloat/common.c
rd3b2ffa r1a9174e 252 252 int j; 253 253 for (j = 0; j < 32; j += 8) { 254 if (i & (0xFF u<< (24 - j))) {254 if (i & (0xFF << (24 - j))) { 255 255 return (j + count_zeroes8(i >> (24 - j))); 256 256 } -
uspace/lib/usb/include/usb/request.h
rd3b2ffa r1a9174e 75 75 #define USB_SETUP_PACKET_SIZE 8 76 76 77 /** Device request setup packet. 78 * The setup packet describes the request. 79 */ 80 typedef struct { 81 /** Request type. 82 * The type combines transfer direction, request type and 83 * intended recipient. 84 */ 85 uint8_t request_type; 77 86 #define SETUP_REQUEST_TYPE_DEVICE_TO_HOST (1 << 7) 78 87 #define SETUP_REQUEST_TYPE_HOST_TO_DEVICE (0 << 7) … … 85 94 (uint8_t)(((type & 0x3) << 5) | (recipient & 0x1f)) 86 95 87 /** Device request setup packet. 88 * The setup packet describes the request. 89 */ 90 typedef union { 91 struct __attribute__((packed)) { 92 /** Request type. 93 * The type combines transfer direction, request type and 94 * intended recipient. 95 */ 96 uint8_t request_type; 97 98 /** Request identification. */ 99 uint8_t request; 100 /** Main parameter to the request. */ 101 union __attribute__((packed)) { 102 uint16_t value; 103 /* FIXME: add #ifdefs according to host endianness */ 104 struct __attribute__((packed)) { 105 uint8_t value_low; 106 uint8_t value_high; 107 }; 96 /** Request identification. */ 97 uint8_t request; 98 /** Main parameter to the request. */ 99 union __attribute__((packed)) { 100 uint16_t value; 101 /* FIXME: add #ifdefs according to host endianness */ 102 struct __attribute__((packed)) { 103 uint8_t value_low; 104 uint8_t value_high; 108 105 }; 109 /** Auxiliary parameter to the request.110 * Typically, it is offset to something.111 */112 uint16_t index;113 /** Length of extra data. */114 uint16_t length;115 106 }; 116 uint64_t raw; 107 /** Auxiliary parameter to the request. 108 * Typically, it is offset to something. 109 */ 110 uint16_t index; 111 /** Length of extra data. */ 112 uint16_t length; 117 113 } __attribute__((packed)) usb_device_request_setup_packet_t; 118 114 -
uspace/lib/usbhost/src/usb2_bus.c
rd3b2ffa r1a9174e 142 142 usb_log_debug("Device(%d): Setting USB address.", address); 143 143 err = bus_device_send_batch_sync(dev, usb2_default_target, USB_DIRECTION_OUT, 144 NULL, 0, set_address.raw, "set address", NULL);144 NULL, 0, *(uint64_t *)&set_address, "set address", NULL); 145 145 if (err) { 146 146 usb_log_error("Device(%d): Failed to set new address: %s.", -
uspace/lib/virtio/virtio-pci.h
rd3b2ffa r1a9174e 181 181 } virtio_dev_t; 182 182 183 extern errno_t virtio_setup_dma_bufs(unsigned int, size_t, bool, void *[],184 uintptr_t []);185 extern void virtio_teardown_dma_bufs(void *[]);186 187 183 extern void virtio_virtq_desc_set(virtio_dev_t *vdev, uint16_t, uint16_t, 188 184 uint64_t, uint32_t, uint16_t, uint16_t); … … 190 186 uint16_t); 191 187 192 extern void virtio_create_desc_free_list(virtio_dev_t *, uint16_t, uint16_t,193 uint16_t *);194 extern uint16_t virtio_alloc_desc(virtio_dev_t *, uint16_t, uint16_t *);195 extern void virtio_free_desc(virtio_dev_t *, uint16_t, uint16_t *, uint16_t);196 197 188 extern void virtio_virtq_produce_available(virtio_dev_t *, uint16_t, uint16_t); 198 189 extern bool virtio_virtq_consume_used(virtio_dev_t *, uint16_t, uint16_t *, -
uspace/lib/virtio/virtio.c
rd3b2ffa r1a9174e 39 39 #include <libarch/barrier.h> 40 40 41 /** Allocate DMA buffers42 *43 * @param buffers[in] Number of buffers to allocate.44 * @param size[in] Size of each buffer.45 * @param write[in] True if the buffers are writable by the driver, false46 * otherwise.47 * @param buf[out] Output array holding virtual addresses of the allocated48 * buffers.49 * @param buf_p[out] Output array holding physical addresses of the allocated50 * buffers.51 *52 * The buffers can be deallocated by virtio_net_teardown_bufs().53 *54 * @return EOK on success or error code.55 */56 errno_t virtio_setup_dma_bufs(unsigned int buffers, size_t size,57 bool write, void *buf[], uintptr_t buf_p[])58 {59 /*60 * Allocate all buffers at once in one large chunk.61 */62 void *virt = AS_AREA_ANY;63 uintptr_t phys;64 errno_t rc = dmamem_map_anonymous(buffers * size, 0,65 write ? AS_AREA_WRITE : AS_AREA_READ, 0, &phys, &virt);66 if (rc != EOK)67 return rc;68 69 ddf_msg(LVL_NOTE, "DMA buffers: %p-%p", virt, virt + buffers * size);70 71 /*72 * Calculate addresses of the individual buffers for easy access.73 */74 for (unsigned i = 0; i < buffers; i++) {75 buf[i] = virt + i * size;76 buf_p[i] = phys + i * size;77 }78 79 return EOK;80 }81 82 /** Deallocate DMA buffers83 *84 * @param buf[in] Array holding the virtual addresses of the DMA buffers85 * previously allocated by virtio_net_setup_bufs().86 */87 extern void virtio_teardown_dma_bufs(void *buf[])88 {89 if (buf[0]) {90 dmamem_unmap_anonymous(buf[0]);91 buf[0] = NULL;92 }93 }94 95 41 void virtio_virtq_desc_set(virtio_dev_t *vdev, uint16_t num, uint16_t descno, 96 42 uint64_t addr, uint32_t len, uint16_t flags, uint16_t next) … … 111 57 return pio_read_le16(&d->next); 112 58 } 113 114 /** Create free descriptor list from the unused VIRTIO descriptors115 *116 * @param vdev[in] VIRTIO device for which the free list will be created.117 * @param num[in] Index of the virtqueue for which the free list will be118 * created.119 * @param size[in] Number of descriptors on the free list. The free list will120 * contain descriptors starting from 0 to \a size - 1.121 * @param head[out] Variable that will hold the VIRTIO descriptor at the head122 * of the free list.123 */124 void virtio_create_desc_free_list(virtio_dev_t *vdev, uint16_t num,125 uint16_t size, uint16_t *head)126 {127 for (unsigned i = 0; i < size; i++) {128 virtio_virtq_desc_set(vdev, num, i, 0, 0,129 VIRTQ_DESC_F_NEXT, (i + 1 == size) ? -1U : i + 1);130 }131 *head = 0;132 }133 134 /** Allocate a descriptor from the free list135 *136 * @param vdev[in] VIRTIO device with the free list.137 * @param num[in] Index of the virtqueue with free list.138 * @param head[in,out] Head of the free list.139 *140 * @return Allocated descriptor or 0xFFFF if the list is empty.141 */142 uint16_t virtio_alloc_desc(virtio_dev_t *vdev, uint16_t num, uint16_t *head)143 {144 virtq_t *q = &vdev->queues[num];145 fibril_mutex_lock(&q->lock);146 uint16_t descno = *head;147 if (descno != (uint16_t) -1U)148 *head = virtio_virtq_desc_get_next(vdev, num, descno);149 fibril_mutex_unlock(&q->lock);150 return descno;151 }152 153 /** Free a descriptor into the free list154 *155 * @param vdev[in] VIRTIO device with the free list.156 * @param num[in] Index of the virtqueue with free list.157 * @param head[in,out] Head of the free list.158 * @param descno[in] The freed descriptor.159 */160 void virtio_free_desc(virtio_dev_t *vdev, uint16_t num, uint16_t *head,161 uint16_t descno)162 {163 virtq_t *q = &vdev->queues[num];164 fibril_mutex_lock(&q->lock);165 virtio_virtq_desc_set(vdev, num, descno, 0, 0, VIRTQ_DESC_F_NEXT,166 *head);167 *head = descno;168 fibril_mutex_unlock(&q->lock);169 }170 171 59 172 60 void virtio_virtq_produce_available(virtio_dev_t *vdev, uint16_t num,
Note:
See TracChangeset
for help on using the changeset viewer.