Linux内核最新的连续内存分配器(CMA)——避免预留大块内存

栏目: 服务器 · Linux · 发布时间: 5年前

内容简介:在我们使用ARM等嵌入式Linux系统的时候,一个头疼的问题是GPU,Camera,HDMI等都需要预留大量连续内存,这部分内存平时不用,但是一般的做法又必须先预留着。目前,Marek Szyprowski和Michal Nazarewicz实现了一套全新的Contiguous Memory Allocator。通过这套机制,我们可以做到不预留内存,这些内存平时是可用的,只有当需要的时候才被分配给Camera,HDMI等设备。下面分析它的基本代码流程。内核启动过程中arch/arm/mm/init.c中的a

在我们使用ARM等嵌入式 Linux 系统的时候,一个头疼的问题是GPU,Camera,HDMI等都需要预留大量连续内存,这部分内存平时不用,但是一般的做法又必须先预留着。目前,Marek Szyprowski和Michal Nazarewicz实现了一套全新的Contiguous Memory Allocator。通过这套机制,我们可以做到不预留内存,这些内存平时是可用的,只有当需要的时候才被分配给Camera,HDMI等设备。下面分析它的基本代码流程。

声明连续内存

内核启动过程中arch/arm/mm/init.c中的arm_memblock_init()会调用dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));

该函数位于:drivers/base/dma-contiguous.c

  1. /**

  2. * @limit: End address of the reserved memory (optional, 0 for any).

  3. *

  4. * This function reserves memory from early allocator. It should be

  5. * memory.

  6. */

  7. void __ init  dma_contiguous_reserve ( phys_addr_t limit)

  8. {

  9. unsigned  long selected_size =  0;

  10. pr_debug( "%s(limit %08lx)\n", __func__, ( unsigned  long)limit);

  11. if (size_cmdline !=  -1) {

  12. selected_size = size_cmdline;

  13. else {

  14. #ifdef CONFIG_CMA_SIZE_SEL_MBYTES

  15. selected_size = size_bytes;

  16. # elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)

  17. selected_size = cma_early_percent_memory();

  18. # elif defined(CONFIG_CMA_SIZE_SEL_MIN)

  19. selected_size = min(size_bytes, cma_early_percent_memory());

  20. # elif defined(CONFIG_CMA_SIZE_SEL_MAX)

  21. selected_size = max(size_bytes, cma_early_percent_memory());

  22. # endif

  23. }

  24. if (selected_size) {

  25. pr_debug( "%s: reserving %ld MiB for global area\n", __func__,

  26. selected_size / SZ_1M);

  27. dma_declare_contiguous( NULL, selected_size,  0, limit);

  28. }

  29. };

其中的size_bytes定义为:

static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M; 默认情况下,CMA_SIZE_MBYTES会被定义为16MB,来源于CONFIG_CMA_SIZE_MBYTES=16

->

  1. int __ init  dma_declare_contiguous (struct device *dev,  unsigned  long size,

  2. phys_addr_t base,  phys_addr_t limit)

  3. {

  4. ...

  5. /* Reserve memory */

  6. if (base) {

  7. if (memblock_is_region_reserved(base, size) ||

  8. memblock_reserve(base, size) <  0) {

  9. base = -EBUSY;

  10. goto err;

  11. }

  12. else {

  13. /*

  14. * Use __memblock_alloc_base() since

  15. * memblock_alloc_base() panic()s.

  16. */

  17. phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);

  18. if (!addr) {

  19. base = -ENOMEM;

  20. goto err;

  21. else  if (addr + size > ~( unsigned  long) 0) {

  22. memblock_free(addr, size);

  23. base = -EINVAL;

  24. base = -EINVAL;

  25. goto err;

  26. else {

  27. base = addr;

  28. }

  29. }

  30. /*

  31. * subsystems (like slab allocator) are available.

  32. */

  33. r->start = base;

  34. r->size = size;

  35. r->dev = dev;

  36. cma_reserved_count++;

  37. pr_info( "CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,

  38. ( unsigned  long)base);

  39. /* Architecture specific contiguous memory fixup. */

  40. dma_contiguous_early_fixup(base, size);

  41. return  0;

  42. err:

  43. pr_err( "CMA: failed to reserve %ld MiB\n", size / SZ_1M);

  44. return base;

  45. }

由此可见,连续内存区域也是在内核启动的早期,通过__memblock_alloc_base()拿到的。

另外:

drivers/base/dma-contiguous.c里面的core_initcall()会导致cma_init_reserved_areas()被调用:

  1. static  int __ init  cma_init_reserved_areas ( void)

  2. {

  3. struct  cma_reserved * r =  cma_reserved;

  4. unsigned i = cma_reserved_count;

  5. pr_debug( "%s()\n", __func__);

  6. for (; i; --i, ++r) {

  7. struct  cma * cma;

  8. cma = cma_create_area(PFN_DOWN(r->start),

  9. r->size >> PAGE_SHIFT);

  10. if (!IS_ERR(cma))

  11. dev_set_cma_area(r->dev, cma);

  12. }

  13. return  0;

  14. }

  15. core_initcall(cma_init_reserved_areas);

cma_create_area()会调用cma_activate_area(),cma_activate_area()函数则会针对每个page调用:

init_cma_reserved_pageblock(pfn_to_page(base_pfn));

这个函数则会通过set_pageblock_migratetype(page, MIGRATE_CMA)将页设置为MIGRATE_CMA类型的:

  1. # ifdef CONFIG_CMA

  2. void __ init  init_cma_reserved_pageblock (struct page *page)

  3. {

  4. unsigned i = pageblock_nr_pages;

  5. struct  page * p =  page;

  6. do {

  7. __ClearPageReserved(p);

  8. set_page_count(p,  0);

  9. while (++p, --i);

  10. set_page_refcounted(page);

  11. set_pageblock_migratetype(page, MIGRATE_CMA);

  12. __free_pages(page, pageblock_order);

  13. totalram_pages += pageblock_nr_pages;

  14. }

  15. # endif

同时其中调用的__free_pages(page, pageblock_order);最终会调用到__free_one_page(page, zone, order, migratetype);

相关的page会被加到MIGRATE_CMA的free_list上面去:

list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);

申请连续内存

申请连续内存仍然使用标准的arch/arm/mm/dma-mapping.c中定义的dma_alloc_coherent()和dma_alloc_writecombine(),这二者会间接调用drivers/base/dma-contiguous.c中的

  1. struct page * dma_alloc_from_contiguous (struct device *dev,  int count,

  2. unsigned  int align)

->

  1. struct page * dma_alloc_from_contiguous (struct device *dev,  int count,

  2. unsigned  int align)

  3. {

  4. ...

  5. for (;;) {

  6. pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,

  7. start, count, mask);

  8. if (pageno >= cma->count) {

  9. ret = -ENOMEM;

  10. goto error;

  11. }

  12. pfn = cma->base_pfn + pageno;

  13. ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);

  14. if (ret ==  0) {

  15. bitmap_set(cma->bitmap, pageno, count);

  16. break;

  17. else  if (ret != -EBUSY) {

  18. goto error;

  19. }

  20. pr_debug( "%s(): memory range at %p is busy, retrying\n",

  21. __func__, pfn_to_page(pfn));

  22. /* try again with a bit different memory target */

  23. start = pageno + mask +  1;

  24. }

  25. ...

  26. }

->

int alloc_contig_range(unsigned long start, unsigned long end,

unsigned migratetype)

需要隔离page,隔离page的作用通过代码的注释可以体现:

  1. /*

  2. * What we do here is we mark all pageblocks in range as

  3. * MIGRATE_ISOLATE. Because of the way page allocator work, we

  4. * align the range to MAX_ORDER pages so that page allocator

  5. * won't try to merge buddies from different pageblocks and

  6. * change MIGRATE_ISOLATE to some other migration type.

  7. *

  8. * Once the pageblocks are marked as MIGRATE_ISOLATE, we

  9. * migrate the pages from an unaligned range (ie. pages that

  10. * we are interested in). This will put all the pages in

  11. * range back to page allocator as MIGRATE_ISOLATE.

  12. *

  13. * When this is done, we take the pages in range from page

  14. * allocator removing them from the buddy system. This way

  15. * page allocator will never consider using them.

  16. *

  17. * This lets us mark the pageblocks back as

  18. * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the

  19. * MAX_ORDER aligned range but not in the unaligned, original

  20. * range are put back to page allocator so that buddy can use

  21. * them.

  22. */

  23. ret = start_isolate_page_range(pfn_align_to_maxpage_down(start),

  24. pfn_align_to_maxpage_up(end),

  25. migratetype);

简单地说,就是把相关的page标记为MIGRATE_ISOLATE,这样buddy系统就不会再使用他们。

  1. /*

  2. * to be MIGRATE_ISOLATE.

  3. * @start_pfn: The lower PFN of the range to be isolated.

  4. * @end_pfn: The upper PFN of the range to be isolated.

  5. * @migratetype: migrate type to set in error recovery.

  6. *

  7. * future will not be allocated again.

  8. *

  9. * start_pfn/end_pfn must be aligned to pageblock_order.

  10. */

  11. int  start_isolate_page_range ( unsigned  long start_pfn,  unsigned  long end_pfn,

  12. unsigned migratetype)

  13. {

  14. unsigned  long pfn;

  15. unsigned  long undo_pfn;

  16. struct  page * page;

  17. BUG_ON((start_pfn) & (pageblock_nr_pages -  1));

  18. BUG_ON((end_pfn) & (pageblock_nr_pages -  1));

  19. for (pfn = start_pfn;

  20. pfn < end_pfn;

  21. pfn += pageblock_nr_pages) {

  22. page = __first_valid_page(pfn, pageblock_nr_pages);

  23. if (page && set_migratetype_isolate(page)) {

  24. undo_pfn = pfn;

  25. goto undo;

  26. }

  27. }

  28. return  0;

  29. undo:

  30. for (pfn = start_pfn;

  31. pfn < undo_pfn;

  32. pfn += pageblock_nr_pages)

  33. unset_migratetype_isolate(pfn_to_page(pfn), migratetype);

  34. return -EBUSY;

  35. }

接下来调用__alloc_contig_migrate_range()进行页面隔离和迁移:

  1. static  int __alloc_contig_migrate_range( unsigned  long start,  unsigned  long end)

  2. {

  3. /* This function is based on compact_zone() from compaction.c. */

  4. unsigned  long pfn = start;

  5. unsigned  int tries =  0;

  6. int ret =  0;

  7. struct  compact_control  cc = {

  8. .nr_migratepages =  0,

  9. .order =  -1,

  10. .zone = page_zone(pfn_to_page(start)),

  11. .sync =  true,

  12. };

  13. INIT_LIST_HEAD(&cc.migratepages);

  14. migrate_prep_local();

  15. while (pfn < end || !list_empty(&cc.migratepages)) {

  16. if (fatal_signal_pending(current)) {

  17. ret = -EINTR;

  18. break;

  19. }

  20. if (list_empty(&cc.migratepages)) {

  21. cc.nr_migratepages =  0;

  22. pfn = isolate_migratepages_range(cc.zone, &cc,

  23. pfn, end);

  24. if (!pfn) {

  25. ret = -EINTR;

  26. break;

  27. }

  28. tries =  0;

  29. else  if (++tries ==  5) {

  30. ret = ret <  0 ? ret : -EBUSY;

  31. break;

  32. }

  33. ret = migrate_pages(&cc.migratepages,

  34. __alloc_contig_migrate_alloc,

  35. 0,  false,  true);

  36. }

  37. putback_lru_pages(&cc.migratepages);

  38. return ret >  0 ?  0 : ret;

  39. }

其中的函数migrate_pages()会完成页面的迁移,迁移过程中通过传入的__alloc_contig_migrate_alloc()申请新的page,并将老的page付给新的page:

  1. int  migrate_pages (struct list_head *from,

  2. new_page_t get_new_page,  unsigned  long  private,  bool offlining,

  3. bool sync)

  4. {

  5. int retry =  1;

  6. int nr_failed =  0;

  7. int pass =  0;

  8. struct  page * page;

  9. struct  page * page2;

  10. int swapwrite = current->flags & PF_SWAPWRITE;

  11. int rc;

  12. if (!swapwrite)

  13. current->flags |= PF_SWAPWRITE;

  14. for(pass =  0; pass <  10 && retry; pass++) {

  15. retry =  0;

  16. list_for_each_entry_safe(page, page2, from, lru) {

  17. cond_resched();

  18. rc = unmap_and_move(get_new_page,  private,

  19. page, pass >  2, offlining,

  20. sync);

  21. switch(rc) {

  22. case -ENOMEM:

  23. goto out;

  24. case -EAGAIN:

  25. retry++;

  26. break;

  27. case  0:

  28. break;

  29. default:

  30. /* Permanent failure */

  31. nr_failed++;

  32. break;

  33. }

  34. }

  35. }

  36. rc =  0;

  37. ...

  38. }

其中的unmap_and_move()函数较为关键,它定义在mm/migrate.c中

  1. /*

  2. * Obtain the lock on page, remove all ptes and migrate the page

  3. * to the newly allocated page in newpage.

  4. */

  5. static  int  unmap_and_move ( new_page_t get_new_page,  unsigned  long  private,

  6. struct page *page,  int force,  bool offlining,  bool sync)

  7. {

  8. int rc =  0;

  9. int *result =  NULL;

  10. struct  page * newpage =  get_new_page( page,  private, & result);

  11. int remap_swapcache =  1;

  12. int charge =  0;

  13. struct  mem_cgroup * mem =  NULL;

  14. struct  anon_vma * anon_vma =  NULL;

  15. ...

  16. /* charge against new page */

  17. charge = mem_cgroup_prepare_migration(page, newpage, &mem);

  18. ...

  19. if (PageWriteback(page)) {

  20. if (!force || !sync)

  21. goto uncharge;

  22. wait_on_page_writeback(page);

  23. }

  24. /*

  25. * we cannot notice that anon_vma is freed while we migrates a page.

  26. * This get_anon_vma() delays freeing anon_vma pointer until the end

  27. * File Caches may use write_page() or lock_page() in migration, then,

  28. * just care Anon page here.

  29. */

  30. if (PageAnon(page)) {

  31. /*

  32. * Only page_lock_anon_vma() understands the subtleties of

  33. * getting a hold on an anon_vma from outside one of its mms.

  34. */

  35. anon_vma = page_lock_anon_vma(page);

  36. if (anon_vma) {

  37. /*

  38. * Take a reference count on the anon_vma if the

  39. * page is mapped so that it is guaranteed to

  40. * exist when the page is remapped later

  41. */

  42. get_anon_vma(anon_vma);

  43. page_unlock_anon_vma(anon_vma);

  44. else  if (PageSwapCache(page)) {

  45. /*

  46. * We cannot be sure that the anon_vma of an unmapped

  47. * swapcache page is safe to use because we don't

  48. * know in advance if the VMA that this page belonged

  49. * to still exists. If the VMA and others sharing the

  50. * data have been freed, then the anon_vma could

  51. * already be invalid.

  52. *

  53. * To avoid this possibility, swapcache pages get

  54. * migrated but are not remapped when migration

  55. * completes

  56. */

  57. remap_swapcache =  0;

  58. else {

  59. goto uncharge;

  60. }

  61. }

  62. ...

  63. /* Establish migration ptes or remove ptes */

  64. try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);

  65. skip_unmap:

  66. if (!page_mapped(page))

  67. rc = move_to_new_page(newpage, page, remap_swapcache);

  68. if (rc && remap_swapcache)

  69. remove_migration_ptes(page, page);

  70. /* Drop an anon_vma reference if we took one */

  71. if (anon_vma)

  72. drop_anon_vma(anon_vma);

  73. uncharge:

  74. if (!charge)

  75. mem_cgroup_end_migration(mem, page, newpage, rc ==  0);

  76. unlock:

  77. unlock_page(page);

  78. move_newpage:

  79. ...

  80. }

通过unmap_and_move(),老的page就被迁移过去新的page。

接下来要回收page,回收page的作用是,不至于因为拿了连续的内存后,系统变得内存饥饿:

->

  1. /*

  2. * Reclaim enough pages to make sure that contiguous allocation

  3. * will not starve the system.

  4. */

  5. __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);

->

  1. /*

  2. * allocate 'count' pages in single page units. Does similar work as

  3. *__alloc_pages_slowpath() function.

  4. */

  5. static  int __reclaim_pages(struct zone *zone,  gfp_t gfp_mask,  int count)

  6. {

  7. enum zone_type high_zoneidx = gfp_zone(gfp_mask);

  8. struct  zonelist * zonelist =  node_zonelist(0,  gfp_mask);

  9. int did_some_progress =  0;

  10. int order =  1;

  11. unsigned  long watermark;

  12. /*

  13. * Increase level of watermarks to force kswapd do his job

  14. * to stabilise at new watermark level.

  15. */

  16. __update_cma_watermarks(zone, count);

  17. /* Obey watermarks as if the page was being allocated */

  18. watermark = low_wmark_pages(zone) + count;

  19. while (!zone_watermark_ok(zone,  0, watermark,  0,  0)) {

  20. wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));

  21. did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,

  22. NULL);

  23. if (!did_some_progress) {

  24. /* Exhausted what can be done so it's blamo time */

  25. out_of_memory(zonelist, gfp_mask, order,  NULL);

  26. }

  27. }

  28. /* Restore original watermark levels. */

  29. __update_cma_watermarks(zone, -count);

  30. return count;

  31. }

释放连续内存

内存释放的时候也比较简单,直接就是:

arch/arm/mm/dma-mapping.c:

void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)

->

arch/arm/mm/dma-mapping.c:

  1. static  void __free_from_contiguous(struct device *dev, struct page *page,

  2. size_t size)

  3. {

  4. __dma_remap(page, size, pgprot_kernel);

  5. dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);

  6. }

->

  1. bool  dma_release_from_contiguous (struct device *dev, struct page *pages,

  2. int count)

  3. {

  4. ...

  5. free_contig_range(pfn, count);

  6. ..

  7. }

->

  1. void  free_contig_range ( unsigned  long pfn,  unsigned nr_pages)

  2. {

  3. for (; nr_pages--; ++pfn)

  4. __free_page(pfn_to_page(pfn));

  5. }

将page交还给buddy。

内核内存分配的migratetype

内核内存分配的时候,带的标志是GFP_,但是GFP_可以转化为migratetype:

  1. static  inline  int  allocflags_to_migratetype ( gfp_t gfp_flags)

  2. {

  3. WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);

  4. if (unlikely(page_group_by_mobility_disabled))

  5. return MIGRATE_UNMOVABLE;

  6. /* Group based on mobility */

  7. return (((gfp_flags & __GFP_MOVABLE) !=  0) <<  1) |

  8. ((gfp_flags & __GFP_RECLAIMABLE) !=  0);

  9. }

之后申请内存的时候,会对比迁移类型匹配的free_list:

  1. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,

  2. zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,

  3. preferred_zone, migratetype);

另外,笔者也编写了一个测试程序,透过它随时测试CMA的功能:

  1. /*

  2. * kernel module helper for testing CMA

  3. *

  4. * Licensed under GPLv2 or later.

  5. */

  6. # include  <linux/module.h>

  7. # include  <linux/device.h>

  8. # include  <linux/fs.h>

  9. # include  <linux/miscdevice.h>

  10. # include  <linux/dma-mapping.h>

  11. # define CMA_NUM 10

  12. static  struct  device * cma_dev;

  13. static  dma_addr_t dma_phys[CMA_NUM];

  14. static  void *dma_virt[CMA_NUM];

  15. /* any read request will free coherent memory, eg.

  16. * cat /dev/cma_test

  17. */

  18. static ssize_t

  19. cma_test_read (struct file *file,  char __user *buf,  size_t count,  loff_t *ppos)

  20. {

  21. int i;

  22. for (i =  0; i < CMA_NUM; i++) {

  23. if (dma_virt[i]) {

  24. dma_free_coherent(cma_dev, (i +  1) * SZ_1M, dma_virt[i], dma_phys[i]);

  25. _dev_info(cma_dev,  "free virt: %p phys: %p\n", dma_virt[i], ( void *)dma_phys[i]);

  26. dma_virt[i] =  NULL;

  27. break;

  28. }

  29. }

  30. return  0;

  31. }

  32. /*

  33. * any write request will alloc coherent memory, eg.

  34. * echo 0 > /dev/cma_test

  35. */

  36. static ssize_t

  37. cma_test_write (struct file *file,  const  char __user *buf,  size_t count,  loff_t *ppos)

  38. {

  39. int i;

  40. int ret;

  41. for (i =  0; i < CMA_NUM; i++) {

  42. if (!dma_virt[i]) {

  43. dma_virt[i] = dma_alloc_coherent(cma_dev, (i +  1) * SZ_1M, &dma_phys[i], GFP_KERNEL);

  44. if (dma_virt[i]) {

  45. void *p;

  46. /* touch every page in the allocated memory */

  47. for (p = dma_virt[i]; p < dma_virt[i] + (i +  1) * SZ_1M; p += PAGE_SIZE)

  48. *(u32 *)p =  0;

  49. _dev_info(cma_dev,  "alloc virt: %p phys: %p\n", dma_virt[i], ( void *)dma_phys[i]);

  50. else {

  51. dev_err(cma_dev,  "no mem in CMA area\n");

  52. ret = -ENOMEM;

  53. }

  54. break;

  55. }

  56. }

  57. return count;

  58. }

  59. static  const  struct  file_operations  cma_test_fops = {

  60. .owner = THIS_MODULE,

  61. .read = cma_test_read,

  62. .write = cma_test_write,

  63. };

  64. static  struct  miscdevice  cma_test_misc = {

  65. .name =  "cma_test",

  66. .fops = &cma_test_fops,

  67. };

  68. static  int __ init  cma_test_init ( void)

  69. {

  70. int ret =  0;

  71. ret = misc_register(&cma_test_misc);

  72. if (unlikely(ret)) {

  73. pr_err( "failed to register cma test misc device!\n");

  74. return ret;

  75. }

  76. cma_dev = cma_test_misc.this_device;

  77. cma_dev->coherent_dma_mask = ~ 0;

  78. _dev_info(cma_dev,  "registered.\n");

  79. return ret;

  80. }

  81. module_init(cma_test_init);

  82. static  void __ exit  cma_test_exit ( void)

  83. {

  84. misc_deregister(&cma_test_misc);

  85. }

  86. module_exit(cma_test_exit);

  87. MODULE_LICENSE( "GPL");

  88. MODULE_AUTHOR( "Barry Song <21cnbao@gmail.com>");

  89. MODULE_DESCRIPTION( "kernel module to help the test of CMA");

  90. MODULE_ALIAS( "CMA test");

申请内存:

# echo 0 > /dev/cma_test

释放内存:

# cat /dev/cma_test

本文永久更新链接: http://embeddedlinux.org.cn/emb-linux/kernel-driver/201904/21-8646.html


以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持 码农网

查看所有标签

猜你喜欢:

本站部分资源来源于网络,本站转载出于传递更多信息之目的,版权归原作者或者来源机构所有,如转载稿涉及版权问题,请联系我们

数学建模

数学建模

[美] Frank R.Giordano,Maurice D.Weir,William P.Fox / 机械工业出版社 / 2004-1 / 45.00元

数学建模是用数学方法解决各种实际问题的桥梁。本书分离散建模(第1~9章)和连续建模(第10~13章)两部分介绍了整个建模过程的原理,通过本书的学习,学生将**会在创造性模型和经验模型的构建、模型分析以及模型研究方面进行实践,增强解决问题的能力。 ·论证了离散动力系统,离散优化等技术对现代应用数学的发展的促进作用。 ·在创造性模型和经验模型的构建、模型分析以及模型研究中融入个人项目和小组......一起来看看 《数学建模》 这本书的介绍吧!

HTML 编码/解码
HTML 编码/解码

HTML 编码/解码

正则表达式在线测试
正则表达式在线测试

正则表达式在线测试

HSV CMYK 转换工具
HSV CMYK 转换工具

HSV CMYK互换工具